title
stringlengths 4
168
| content
stringlengths 7
1.74M
| commands
sequencelengths 1
5.62k
⌀ | url
stringlengths 79
342
|
---|---|---|---|
Builds using BuildConfig | Builds using BuildConfig Red Hat OpenShift Service on AWS 4 Contains information about builds for Red Hat OpenShift Service on AWS Red Hat OpenShift Documentation Team | [
"kind: BuildConfig apiVersion: build.openshift.io/v1 metadata: name: \"ruby-sample-build\" 1 spec: runPolicy: \"Serial\" 2 triggers: 3 - type: \"GitHub\" github: secret: \"secret101\" - type: \"Generic\" generic: secret: \"secret101\" - type: \"ImageChange\" source: 4 git: uri: \"https://github.com/openshift/ruby-hello-world\" strategy: 5 sourceStrategy: from: kind: \"ImageStreamTag\" name: \"ruby-20-centos7:latest\" output: 6 to: kind: \"ImageStreamTag\" name: \"origin-ruby-sample:latest\" postCommit: 7 script: \"bundle exec rake test\"",
"source: git: uri: https://github.com/openshift/ruby-hello-world.git 1 ref: \"master\" images: - from: kind: ImageStreamTag name: myinputimage:latest namespace: mynamespace paths: - destinationDir: app/dir/injected/dir 2 sourcePath: /usr/lib/somefile.jar contextDir: \"app/dir\" 3 dockerfile: \"FROM centos:7\\nRUN yum install -y httpd\" 4",
"source: dockerfile: \"FROM centos:7\\nRUN yum install -y httpd\" 1",
"source: git: uri: https://github.com/openshift/ruby-hello-world.git ref: \"master\" images: 1 - from: 2 kind: ImageStreamTag name: myinputimage:latest namespace: mynamespace paths: 3 - destinationDir: injected/dir 4 sourcePath: /usr/lib/somefile.jar 5 - from: kind: ImageStreamTag name: myotherinputimage:latest namespace: myothernamespace pullSecret: mysecret 6 paths: - destinationDir: injected/dir sourcePath: /usr/lib/somefile.jar",
"oc secrets link builder dockerhub",
"source: git: 1 uri: \"https://github.com/openshift/ruby-hello-world\" ref: \"master\" contextDir: \"app/dir\" 2 dockerfile: \"FROM openshift/ruby-22-centos7\\nUSER example\" 3",
"source: git: uri: \"https://github.com/openshift/ruby-hello-world\" ref: \"master\" httpProxy: http://proxy.example.com httpsProxy: https://proxy.example.com noProxy: somedomain.com, otherdomain.com",
"oc annotate secret mysecret 'build.openshift.io/source-secret-match-uri-1=ssh://bitbucket.atlassian.com:7999/*'",
"kind: Secret apiVersion: v1 metadata: name: matches-all-corporate-servers-https-only annotations: build.openshift.io/source-secret-match-uri-1: https://*.mycorp.com/* data: --- kind: Secret apiVersion: v1 metadata: name: override-for-my-dev-servers-https-only annotations: build.openshift.io/source-secret-match-uri-1: https://mydev1.mycorp.com/* build.openshift.io/source-secret-match-uri-2: https://mydev2.mycorp.com/* data:",
"oc annotate secret mysecret 'build.openshift.io/source-secret-match-uri-1=https://*.mycorp.com/*'",
"apiVersion: \"build.openshift.io/v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: output: to: kind: \"ImageStreamTag\" name: \"sample-image:latest\" source: git: uri: \"https://github.com/user/app.git\" sourceSecret: name: \"basicsecret\" strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"python-33-centos7:latest\"",
"oc set build-secret --source bc/sample-build basicsecret",
"oc create secret generic <secret_name> --from-file=<path/to/.gitconfig>",
"[http] sslVerify=false",
"cat .gitconfig",
"[user] name = <name> email = <email> [http] sslVerify = false sslCert = /var/run/secrets/openshift.io/source/client.crt sslKey = /var/run/secrets/openshift.io/source/client.key sslCaInfo = /var/run/secrets/openshift.io/source/cacert.crt",
"oc create secret generic <secret_name> --from-literal=username=<user_name> \\ 1 --from-literal=password=<password> \\ 2 --from-file=.gitconfig=.gitconfig --from-file=client.crt=/var/run/secrets/openshift.io/source/client.crt --from-file=cacert.crt=/var/run/secrets/openshift.io/source/cacert.crt --from-file=client.key=/var/run/secrets/openshift.io/source/client.key",
"oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --type=kubernetes.io/basic-auth",
"oc create secret generic <secret_name> --from-literal=password=<token> --type=kubernetes.io/basic-auth",
"ssh-keygen -t ed25519 -C \"[email protected]\"",
"oc create secret generic <secret_name> --from-file=ssh-privatekey=<path/to/ssh/private/key> --from-file=<path/to/known_hosts> \\ 1 --type=kubernetes.io/ssh-auth",
"cat intermediateCA.crt intermediateCA.crt rootCA.crt > ca.crt",
"oc create secret generic mycert --from-file=ca.crt=</path/to/file> 1",
"oc create secret generic <secret_name> --from-file=ssh-privatekey=<path/to/ssh/private/key> --from-file=<path/to/.gitconfig> --type=kubernetes.io/ssh-auth",
"oc create secret generic <secret_name> --from-file=ca.crt=<path/to/certificate> --from-file=<path/to/.gitconfig>",
"oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --from-file=ca-cert=</path/to/file> --type=kubernetes.io/basic-auth",
"oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --from-file=</path/to/.gitconfig> --type=kubernetes.io/basic-auth",
"oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --from-file=</path/to/.gitconfig> --from-file=ca-cert=</path/to/file> --type=kubernetes.io/basic-auth",
"apiVersion: v1 kind: Secret metadata: name: test-secret namespace: my-namespace type: Opaque 1 data: 2 username: <username> 3 password: <password> stringData: 4 hostname: myapp.mydomain.com 5",
"oc create -f <filename>",
"oc create secret generic dockerhub --from-file=.dockerconfigjson=<path/to/.docker/config.json> --type=kubernetes.io/dockerconfigjson",
"apiVersion: v1 kind: Secret metadata: name: mysecret type: Opaque 1 data: username: <username> password: <password>",
"apiVersion: v1 kind: Secret metadata: name: aregistrykey namespace: myapps type: kubernetes.io/dockerconfigjson 1 data: .dockerconfigjson:bm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== 2",
"oc create -f <your_yaml_file>.yaml",
"oc logs secret-example-pod",
"oc delete pod secret-example-pod",
"apiVersion: v1 kind: Secret metadata: name: test-secret data: username: <username> 1 password: <password> 2 stringData: hostname: myapp.mydomain.com 3 secret.properties: |- 4 property1=valueA property2=valueB",
"apiVersion: v1 kind: Pod metadata: name: secret-example-pod spec: containers: - name: secret-test-container image: busybox command: [ \"/bin/sh\", \"-c\", \"cat /etc/secret-volume/*\" ] volumeMounts: # name must match the volume name below - name: secret-volume mountPath: /etc/secret-volume readOnly: true volumes: - name: secret-volume secret: secretName: test-secret restartPolicy: Never",
"apiVersion: v1 kind: Pod metadata: name: secret-example-pod spec: containers: - name: secret-test-container image: busybox command: [ \"/bin/sh\", \"-c\", \"export\" ] env: - name: TEST_SECRET_USERNAME_ENV_VAR valueFrom: secretKeyRef: name: test-secret key: username restartPolicy: Never",
"apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: secret-example-bc spec: strategy: sourceStrategy: env: - name: TEST_SECRET_USERNAME_ENV_VAR valueFrom: secretKeyRef: name: test-secret key: username",
"oc create configmap settings-mvn --from-file=settings.xml=<path/to/settings.xml>",
"apiVersion: core/v1 kind: ConfigMap metadata: name: settings-mvn data: settings.xml: | <settings> ... # Insert maven settings here </settings>",
"oc create secret generic secret-mvn --from-file=ssh-privatekey=<path/to/.ssh/id_rsa> --type=kubernetes.io/ssh-auth",
"apiVersion: core/v1 kind: Secret metadata: name: secret-mvn type: kubernetes.io/ssh-auth data: ssh-privatekey: | # Insert ssh private key, base64 encoded",
"source: git: uri: https://github.com/wildfly/quickstart.git contextDir: helloworld configMaps: - configMap: name: settings-mvn secrets: - secret: name: secret-mvn",
"oc new-build openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git --context-dir helloworld --build-secret \"secret-mvn\" --build-config-map \"settings-mvn\"",
"source: git: uri: https://github.com/wildfly/quickstart.git contextDir: helloworld configMaps: - configMap: name: settings-mvn destinationDir: \".m2\" secrets: - secret: name: secret-mvn destinationDir: \".ssh\"",
"oc new-build openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git --context-dir helloworld --build-secret \"secret-mvn:.ssh\" --build-config-map \"settings-mvn:.m2\"",
"#!/bin/sh APP_VERSION=1.0 wget http://repository.example.com/app/app-USDAPP_VERSION.jar -O app.jar",
"#!/bin/sh exec java -jar app.jar",
"FROM jboss/base-jdk:8 ENV APP_VERSION 1.0 RUN wget http://repository.example.com/app/app-USDAPP_VERSION.jar -O app.jar EXPOSE 8080 CMD [ \"java\", \"-jar\", \"app.jar\" ]",
"auths: index.docker.io/v1/: 1 auth: \"YWRfbGzhcGU6R2labnRib21ifTE=\" 2 email: \"[email protected]\" 3 docker.io/my-namespace/my-user/my-image: 4 auth: \"GzhYWRGU6R2fbclabnRgbkSp=\"\" email: \"[email protected]\" docker.io/my-namespace: 5 auth: \"GzhYWRGU6R2deesfrRgbkSp=\"\" email: \"[email protected]\"",
"oc create secret generic dockerhub --from-file=.dockerconfigjson=<path/to/.docker/config.json> --type=kubernetes.io/dockerconfigjson",
"spec: output: to: kind: \"DockerImage\" name: \"private.registry.com/org/private-image:latest\" pushSecret: name: \"dockerhub\"",
"oc set build-secret --push bc/sample-build dockerhub",
"oc secrets link builder dockerhub",
"strategy: sourceStrategy: from: kind: \"DockerImage\" name: \"docker.io/user/private_repository\" pullSecret: name: \"dockerhub\"",
"oc set build-secret --pull bc/sample-build dockerhub",
"oc secrets link builder dockerhub",
"env: - name: FIELDREF_ENV valueFrom: fieldRef: fieldPath: metadata.name",
"apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: secret-example-bc spec: strategy: sourceStrategy: env: - name: MYVAL valueFrom: secretKeyRef: key: myval name: mysecret",
"spec: output: to: kind: \"ImageStreamTag\" name: \"sample-image:latest\"",
"spec: output: to: kind: \"DockerImage\" name: \"my-registry.mycompany.com:5000/myimages/myimage:tag\"",
"spec: output: to: kind: \"ImageStreamTag\" name: \"my-image:latest\" imageLabels: - name: \"vendor\" value: \"MyCompany\" - name: \"authoritative-source-url\" value: \"registry.mycompany.com\"",
"strategy: dockerStrategy: from: kind: \"ImageStreamTag\" name: \"debian:latest\"",
"strategy: dockerStrategy: dockerfilePath: dockerfiles/app1/Dockerfile",
"dockerStrategy: env: - name: \"HTTP_PROXY\" value: \"http://myproxy.net:5187/\"",
"dockerStrategy: buildArgs: - name: \"version\" value: \"latest\"",
"strategy: dockerStrategy: imageOptimizationPolicy: SkipLayers",
"spec: dockerStrategy: volumes: - name: secret-mvn 1 mounts: - destinationPath: /opt/app-root/src/.ssh 2 source: type: Secret 3 secret: secretName: my-secret 4 - name: settings-mvn 5 mounts: - destinationPath: /opt/app-root/src/.m2 6 source: type: ConfigMap 7 configMap: name: my-config 8",
"strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"incremental-image:latest\" 1 incremental: true 2",
"strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"builder-image:latest\" scripts: \"http://somehost.com/scripts_directory\" 1",
"sourceStrategy: env: - name: \"DISABLE_ASSET_COMPILATION\" value: \"true\"",
"#!/bin/bash restore build artifacts if [ \"USD(ls /tmp/s2i/artifacts/ 2>/dev/null)\" ]; then mv /tmp/s2i/artifacts/* USDHOME/. fi move the application source mv /tmp/s2i/src USDHOME/src build application artifacts pushd USD{HOME} make all install the artifacts make install popd",
"#!/bin/bash run the application /opt/application/run.sh",
"#!/bin/bash pushd USD{HOME} if [ -d deps ]; then # all deps contents to tar stream tar cf - deps fi popd",
"#!/bin/bash inform the user how to use the image cat <<EOF This is a S2I sample builder image, to use it, install https://github.com/openshift/source-to-image EOF",
"spec: sourceStrategy: volumes: - name: secret-mvn 1 mounts: - destinationPath: /opt/app-root/src/.ssh 2 source: type: Secret 3 secret: secretName: my-secret 4 - name: settings-mvn 5 mounts: - destinationPath: /opt/app-root/src/.m2 6 source: type: ConfigMap 7 configMap: name: my-config 8",
"kind: \"BuildConfig\" apiVersion: \"v1\" metadata: name: \"sample-pipeline\" spec: strategy: jenkinsPipelineStrategy: jenkinsfile: |- node('agent') { stage 'build' openshiftBuild(buildConfig: 'ruby-sample-build', showBuildLogs: 'true') stage 'deploy' openshiftDeploy(deploymentConfig: 'frontend') }",
"kind: \"BuildConfig\" apiVersion: \"v1\" metadata: name: \"sample-pipeline\" spec: source: git: uri: \"https://github.com/openshift/ruby-hello-world\" strategy: jenkinsPipelineStrategy: jenkinsfilePath: some/repo/dir/filename 1",
"jenkinsPipelineStrategy: env: - name: \"FOO\" value: \"BAR\"",
"oc project <project_name>",
"oc new-app jenkins-ephemeral 1",
"kind: \"BuildConfig\" apiVersion: \"v1\" metadata: name: \"nodejs-sample-pipeline\" spec: strategy: jenkinsPipelineStrategy: jenkinsfile: <pipeline content from below> type: JenkinsPipeline",
"def templatePath = 'https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json' 1 def templateName = 'nodejs-mongodb-example' 2 pipeline { agent { node { label 'nodejs' 3 } } options { timeout(time: 20, unit: 'MINUTES') 4 } stages { stage('preamble') { steps { script { openshift.withCluster() { openshift.withProject() { echo \"Using project: USD{openshift.project()}\" } } } } } stage('cleanup') { steps { script { openshift.withCluster() { openshift.withProject() { openshift.selector(\"all\", [ template : templateName ]).delete() 5 if (openshift.selector(\"secrets\", templateName).exists()) { 6 openshift.selector(\"secrets\", templateName).delete() } } } } } } stage('create') { steps { script { openshift.withCluster() { openshift.withProject() { openshift.newApp(templatePath) 7 } } } } } stage('build') { steps { script { openshift.withCluster() { openshift.withProject() { def builds = openshift.selector(\"bc\", templateName).related('builds') timeout(5) { 8 builds.untilEach(1) { return (it.object().status.phase == \"Complete\") } } } } } } } stage('deploy') { steps { script { openshift.withCluster() { openshift.withProject() { def rm = openshift.selector(\"dc\", templateName).rollout() timeout(5) { 9 openshift.selector(\"dc\", templateName).related('pods').untilEach(1) { return (it.object().status.phase == \"Running\") } } } } } } } stage('tag') { steps { script { openshift.withCluster() { openshift.withProject() { openshift.tag(\"USD{templateName}:latest\", \"USD{templateName}-staging:latest\") 10 } } } } } } }",
"oc create -f nodejs-sample-pipeline.yaml",
"oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/jenkins/pipeline/nodejs-sample-pipeline.yaml",
"oc start-build nodejs-sample-pipeline",
"oc start-build <buildconfig_name>",
"oc start-build --from-build=<build_name>",
"oc start-build <buildconfig_name> --follow",
"oc start-build <buildconfig_name> --env=<key>=<value>",
"oc start-build hello-world --from-repo=../hello-world --commit=v2",
"oc cancel-build <build_name>",
"oc cancel-build <build1_name> <build2_name> <build3_name>",
"oc cancel-build bc/<buildconfig_name>",
"oc cancel-build bc/<buildconfig_name>",
"oc delete bc <BuildConfigName>",
"oc delete --cascade=false bc <BuildConfigName>",
"oc describe build <build_name>",
"oc describe build <build_name>",
"oc logs -f bc/<buildconfig_name>",
"oc logs --version=<number> bc/<buildconfig_name>",
"sourceStrategy: env: - name: \"BUILD_LOGLEVEL\" value: \"2\" 1",
"type: \"GitHub\" github: secretReference: name: \"mysecret\"",
"- kind: Secret apiVersion: v1 metadata: name: mysecret creationTimestamp: data: WebHookSecretKey: c2VjcmV0dmFsdWUx",
"apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: \"true\" name: webhook-access-unauthenticated namespace: <namespace> 1 roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: \"system:webhook\" subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: \"system:unauthenticated\"",
"oc apply -f add-webhooks-unauth.yaml",
"type: \"GitHub\" github: secretReference: name: \"mysecret\"",
"https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/github",
"oc describe bc/<name_of_your_BuildConfig>",
"https://api.starter-us-east-1.openshift.com:443/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/github",
"curl -H \"X-GitHub-Event: push\" -H \"Content-Type: application/json\" -k -X POST --data-binary @payload.json https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/github",
"type: \"GitLab\" gitlab: secretReference: name: \"mysecret\"",
"https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/gitlab",
"oc describe bc <name>",
"curl -H \"X-GitLab-Event: Push Hook\" -H \"Content-Type: application/json\" -k -X POST --data-binary @payload.json https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/gitlab",
"type: \"Bitbucket\" bitbucket: secretReference: name: \"mysecret\"",
"https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/bitbucket",
"oc describe bc <name>",
"curl -H \"X-Event-Key: repo:push\" -H \"Content-Type: application/json\" -k -X POST --data-binary @payload.json https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/bitbucket",
"type: \"Generic\" generic: secretReference: name: \"mysecret\" allowEnv: true 1",
"https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/generic",
"curl -X POST -k https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/generic",
"git: uri: \"<url to git repository>\" ref: \"<optional git reference>\" commit: \"<commit hash identifying a specific git commit>\" author: name: \"<author name>\" email: \"<author e-mail>\" committer: name: \"<committer name>\" email: \"<committer e-mail>\" message: \"<commit message>\" env: 1 - name: \"<variable name>\" value: \"<variable value>\"",
"curl -H \"Content-Type: application/yaml\" --data-binary @payload_file.yaml -X POST -k https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/generic",
"oc describe bc <name>",
"kind: \"ImageStream\" apiVersion: \"v1\" metadata: name: \"ruby-20-centos7\"",
"strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"ruby-20-centos7:latest\"",
"type: \"ImageChange\" 1 imageChange: {} type: \"ImageChange\" 2 imageChange: from: kind: \"ImageStreamTag\" name: \"custom-image:latest\"",
"strategy: sourceStrategy: from: kind: \"DockerImage\" name: \"172.30.17.3:5001/mynamespace/ruby-20-centos7:<immutableid>\"",
"type: \"ImageChange\" imageChange: from: kind: \"ImageStreamTag\" name: \"custom-image:latest\" paused: true",
"apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: bc-ict-example namespace: bc-ict-example-namespace spec: triggers: - imageChange: from: kind: ImageStreamTag name: input:latest namespace: bc-ict-example-namespace - imageChange: from: kind: ImageStreamTag name: input2:latest namespace: bc-ict-example-namespace type: ImageChange status: imageChangeTriggers: - from: name: input:latest namespace: bc-ict-example-namespace lastTriggerTime: \"2021-06-30T13:47:53Z\" lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input@sha256:0f88ffbeb9d25525720bfa3524cb1bf0908b7f791057cf1acfae917b11266a69 - from: name: input2:latest namespace: bc-ict-example-namespace lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input2@sha256:0f88ffbeb9d25525720bfa3524cb2ce0908b7f791057cf1acfae917b11266a69 lastVersion: 1",
"Then you use the `name` and `namespace` from that build to find the corresponding image change trigger in `buildConfig.spec.triggers`.",
"type: \"ConfigChange\"",
"oc set triggers bc <name> --from-github",
"oc set triggers bc <name> --from-image='<image>'",
"oc set triggers bc <name> --from-bitbucket --remove",
"oc set triggers --help",
"postCommit: script: \"bundle exec rake test --verbose\"",
"postCommit: command: [\"/bin/bash\", \"-c\", \"bundle exec rake test --verbose\"]",
"postCommit: command: [\"bundle\", \"exec\", \"rake\", \"test\"] args: [\"--verbose\"]",
"oc set build-hook bc/mybc --post-commit --command -- bundle exec rake test --verbose",
"oc set build-hook bc/mybc --post-commit --script=\"bundle exec rake test --verbose\"",
"apiVersion: \"v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: resources: limits: cpu: \"100m\" 1 memory: \"256Mi\" 2",
"resources: requests: 1 cpu: \"100m\" memory: \"256Mi\"",
"spec: completionDeadlineSeconds: 1800",
"apiVersion: \"v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: nodeSelector: 1 key1: value1 key2: value2",
"apiVersion: \"v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: successfulBuildsHistoryLimit: 2 1 failedBuildsHistoryLimit: 2 2",
"oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi:latest",
"apiVersion: image.openshift.io/v1 kind: ImageStream metadata: name: ubi9 spec: tags: - from: kind: DockerImage name: registry.redhat.io/ubi9/ubi:latest name: latest referencePolicy: type: Source",
"cat << EOF > secret-template.txt kind: Secret apiVersion: v1 metadata: name: etc-pki-entitlement type: Opaque data: {{ range \\USDkey, \\USDvalue := .data }} {{ \\USDkey }}: {{ \\USDvalue }} {{ end }} EOF oc get secret etc-pki-entitlement -n openshift-config-managed -o=go-template-file --template=secret-template.txt | oc apply -f -",
"strategy: dockerStrategy: from: kind: ImageStreamTag name: ubi9:latest volumes: - name: etc-pki-entitlement mounts: - destinationPath: /etc/pki/entitlement source: type: Secret secret: secretName: etc-pki-entitlement",
"FROM registry.redhat.io/ubi9/ubi:latest RUN rm -rf /etc/rhsm-host 1 RUN yum --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms install \\ 2 nss_wrapper uid_wrapper -y && yum clean all -y RUN ln -s /run/secrets/rhsm /etc/rhsm-host 3",
"[test-<name>] name=test-<number> baseurl = https://satellite.../content/dist/rhel/server/7/7Server/x86_64/os enabled=1 gpgcheck=0 sslverify=0 sslclientkey = /etc/pki/entitlement/...-key.pem sslclientcert = /etc/pki/entitlement/....pem",
"oc create configmap yum-repos-d --from-file /path/to/satellite.repo",
"strategy: dockerStrategy: from: kind: ImageStreamTag name: ubi9:latest volumes: - name: yum-repos-d mounts: - destinationPath: /etc/yum.repos.d source: type: ConfigMap configMap: name: yum-repos-d - name: etc-pki-entitlement mounts: - destinationPath: /etc/pki/entitlement source: type: Secret secret: secretName: etc-pki-entitlement",
"FROM registry.redhat.io/ubi9/ubi:latest RUN rm -rf /etc/rhsm-host 1 RUN yum --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms install \\ 2 nss_wrapper uid_wrapper -y && yum clean all -y RUN ln -s /run/secrets/rhsm /etc/rhsm-host 3",
"requested access to the resource is denied",
"oc describe quota",
"secret/ssl-key references serviceUID 62ad25ca-d703-11e6-9d6f-0e9c0057b608, which does not match 77b6dd80-d716-11e6-9d6f-0e9c0057b60",
"oc delete secret <secret_name>",
"oc annotate service <service_name> service.beta.openshift.io/serving-cert-generation-error-",
"oc annotate service <service_name> service.beta.openshift.io/serving-cert-generation-error-num-"
] | https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html-single/builds_using_buildconfig/index |
Chapter 10. Troubleshooting | Chapter 10. Troubleshooting The OpenTelemetry Collector offers multiple ways to measure its health as well as investigate data ingestion issues. 10.1. Collecting diagnostic data from the command line When submitting a support case, it is helpful to include diagnostic information about your cluster to Red Hat Support. You can use the oc adm must-gather tool to gather diagnostic data for resources of various types, such as OpenTelemetryCollector , Instrumentation , and the created resources like Deployment , Pod , or ConfigMap . The oc adm must-gather tool creates a new pod that collects this data. Procedure From the directory where you want to save the collected data, run the oc adm must-gather command to collect the data: USD oc adm must-gather --image=ghcr.io/open-telemetry/opentelemetry-operator/must-gather -- \ /usr/bin/must-gather --operator-namespace <operator_namespace> 1 1 The default namespace where the Operator is installed is openshift-opentelemetry-operator . Verification Verify that the new directory is created and contains the collected data. 10.2. Getting the OpenTelemetry Collector logs You can get the logs for the OpenTelemetry Collector as follows. Procedure Set the relevant log level in the OpenTelemetryCollector custom resource (CR): config: service: telemetry: logs: level: debug 1 1 Collector's log level. Supported values include info , warn , error , or debug . Defaults to info . Use the oc logs command or the web console to retrieve the logs. 10.3. Exposing the metrics The OpenTelemetry Collector exposes the metrics about the data volumes it has processed. The following metrics are for spans, although similar metrics are exposed for metrics and logs signals: otelcol_receiver_accepted_spans The number of spans successfully pushed into the pipeline. otelcol_receiver_refused_spans The number of spans that could not be pushed into the pipeline. otelcol_exporter_sent_spans The number of spans successfully sent to the destination. otelcol_exporter_enqueue_failed_spans The number of spans failed to be added to the sending queue. The Operator creates a <cr_name>-collector-monitoring telemetry service that you can use to scrape the metrics endpoint. Procedure Enable the telemetry service by adding the following lines in the OpenTelemetryCollector custom resource (CR): # ... config: service: telemetry: metrics: address: ":8888" 1 # ... 1 The address at which the internal collector metrics are exposed. Defaults to :8888 . Retrieve the metrics by running the following command, which uses the port-forwarding Collector pod: USD oc port-forward <collector_pod> In the OpenTelemetryCollector CR, set the enableMetrics field to true to scrape internal metrics: apiVersion: opentelemetry.io/v1beta1 kind: OpenTelemetryCollector spec: # ... mode: deployment observability: metrics: enableMetrics: true # ... Depending on the deployment mode of the OpenTelemetry Collector, the internal metrics are scraped by using PodMonitors or ServiceMonitors . Note Alternatively, if you do not set the enableMetrics field to true , you can access the metrics endpoint at http://localhost:8888/metrics . On the Observe page in the web console, enable User Workload Monitoring to visualize the scraped metrics. Note Not all processors expose the required metrics. In the web console, go to Observe Dashboards and select the OpenTelemetry Collector dashboard from the drop-down list to view it. Tip You can filter the visualized data such as spans or metrics by the Collector instance, namespace, or OpenTelemetry components such as processors, receivers, or exporters. 10.4. Debug Exporter You can configure the Debug Exporter to export the collected data to the standard output. Procedure Configure the OpenTelemetryCollector custom resource as follows: config: exporters: debug: verbosity: detailed service: pipelines: traces: exporters: [debug] metrics: exporters: [debug] logs: exporters: [debug] Use the oc logs command or the web console to export the logs to the standard output. 10.5. Using the Network Observability Operator for troubleshooting You can debug the traffic between your observability components by visualizing it with the Network Observability Operator. Prerequisites You have installed the Network Observability Operator as explained in "Installing the Network Observability Operator". Procedure In the OpenShift Container Platform web console, go to Observe Network Traffic Topology . Select Namespace to filter the workloads by the namespace in which your OpenTelemetry Collector is deployed. Use the network traffic visuals to troubleshoot possible issues. See "Observing the network traffic from the Topology view" for more details. Additional resources Installing the Network Observability Operator Observing the network traffic from the Topology view 10.6. Troubleshooting the instrumentation To troubleshoot the instrumentation, look for any of the following issues: Issues with instrumentation injection into your workload Issues with data generation by the instrumentation libraries 10.6.1. Troubleshooting instrumentation injection into your workload To troubleshoot instrumentation injection, you can perform the following activities: Checking if the Instrumentation object was created Checking if the init-container started Checking if the resources were deployed in the correct order Searching for errors in the Operator logs Double-checking the pod annotations Procedure Run the following command to verify that the Instrumentation object was successfully created: USD oc get instrumentation -n <workload_project> 1 1 The namespace where the instrumentation was created. Run the following command to verify that the opentelemetry-auto-instrumentation init-container successfully started, which is a prerequisite for instrumentation injection into workloads: USD oc get events -n <workload_project> 1 1 The namespace where the instrumentation is injected for workloads. Example output ... Created container opentelemetry-auto-instrumentation ... Started container opentelemetry-auto-instrumentation Verify that the resources were deployed in the correct order for the auto-instrumentation to work correctly. The correct order is to deploy the Instrumentation custom resource (CR) before the application. For information about the Instrumentation CR, see the section "Configuring the instrumentation". Note When the pod starts, the Red Hat build of OpenTelemetry Operator checks the Instrumentation CR for annotations containing instructions for injecting auto-instrumentation. Generally, the Operator then adds an init-container to the application's pod that injects the auto-instrumentation and environment variables into the application's container. If the Instrumentation CR is not available to the Operator when the application is deployed, the Operator is unable to inject the auto-instrumentation. Fixing the order of deployment requires the following steps: Update the instrumentation settings. Delete the instrumentation object. Redeploy the application. Run the following command to inspect the Operator logs for instrumentation errors: USD oc logs -l app.kubernetes.io/name=opentelemetry-operator --container manager -n openshift-opentelemetry-operator --follow Troubleshoot pod annotations for the instrumentations for a specific programming language. See the required annotation fields and values in "Configuring the instrumentation". Verify that the application pods that you are instrumenting are labeled with correct annotations and the appropriate auto-instrumentation settings have been applied. Example Example command to get pod annotations for an instrumented Python application USD oc get pods -n <workload_project> -o jsonpath='{range .items[?(@.metadata.annotations["instrumentation.opentelemetry.io/inject-python"]=="true")]}{.metadata.name}{"\n"}{end}' Verify that the annotation applied to the instrumentation object is correct for the programming language that you are instrumenting. If there are multiple instrumentations in the same namespace, specify the name of the Instrumentation object in their annotations. Example If the Instrumentation object is in a different namespace, specify the namespace in the annotation. Example Verify that the OpenTelemetryCollector custom resource specifies the auto-instrumentation annotations under spec.template.metadata.annotations . If the auto-instrumentation annotations are in spec.metadata.annotations instead, move them into spec.template.metadata.annotations . 10.6.2. Troubleshooting telemetry data generation by the instrumentation libraries You can troubleshoot telemetry data generation by the instrumentation libraries by checking the endpoint, looking for errors in your application logs, and verifying that the Collector is receiving the telemetry data. Procedure Verify that the instrumentation is transmitting data to the correct endpoint: USD oc get instrumentation <instrumentation_name> -n <workload_project> -o jsonpath='{.spec.endpoint}' The default endpoint http://localhost:4317 for the Instrumentation object is only applicable to a Collector instance that is deployed as a sidecar in your application pod. If you are using an incorrect endpoint, correct it by editing the Instrumentation object and redeploying your application. Inspect your application logs for error messages that might indicate that the instrumentation is malfunctioning: USD oc logs <application_pod> -n <workload_project> If the application logs contain error messages that indicate that the instrumentation might be malfunctioning, install the OpenTelemetry SDK and libraries locally. Then run your application locally and troubleshoot for issues between the instrumentation libraries and your application without OpenShift Container Platform. Use the Debug Exporter to verify that the telemetry data is reaching the destination OpenTelemetry Collector instance. For more information, see "Debug Exporter". | [
"oc adm must-gather --image=ghcr.io/open-telemetry/opentelemetry-operator/must-gather -- /usr/bin/must-gather --operator-namespace <operator_namespace> 1",
"config: service: telemetry: logs: level: debug 1",
"config: service: telemetry: metrics: address: \":8888\" 1",
"oc port-forward <collector_pod>",
"apiVersion: opentelemetry.io/v1beta1 kind: OpenTelemetryCollector spec: mode: deployment observability: metrics: enableMetrics: true",
"config: exporters: debug: verbosity: detailed service: pipelines: traces: exporters: [debug] metrics: exporters: [debug] logs: exporters: [debug]",
"oc get instrumentation -n <workload_project> 1",
"oc get events -n <workload_project> 1",
"... Created container opentelemetry-auto-instrumentation ... Started container opentelemetry-auto-instrumentation",
"oc logs -l app.kubernetes.io/name=opentelemetry-operator --container manager -n openshift-opentelemetry-operator --follow",
"instrumentation.opentelemetry.io/inject-python=\"true\"",
"oc get pods -n <workload_project> -o jsonpath='{range .items[?(@.metadata.annotations[\"instrumentation.opentelemetry.io/inject-python\"]==\"true\")]}{.metadata.name}{\"\\n\"}{end}'",
"instrumentation.opentelemetry.io/inject-nodejs: \"<instrumentation_object>\"",
"instrumentation.opentelemetry.io/inject-nodejs: \"<other_namespace>/<instrumentation_object>\"",
"oc get instrumentation <instrumentation_name> -n <workload_project> -o jsonpath='{.spec.endpoint}'",
"oc logs <application_pod> -n <workload_project>"
] | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/red_hat_build_of_opentelemetry/otel-troubleshoot |
Monitoring and logging | Monitoring and logging Red Hat Developer Hub 1.4 Tracking performance and capturing insights with monitoring and logging tools in Red Hat Developer Hub Red Hat Customer Content Services | [
"apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: <developer_hub_service_monitor_name> 1 namespace: <rhdh_namespace_name> 2 labels: app.kubernetes.io/instance: <rhdh_cr_name> 3 app.kubernetes.io/name: Backstage spec: namespaceSelector: matchNames: - <rhdh_namespace_name> 4 selector: matchLabels: app.kubernetes.io/instance: <deployment_name> 5 app.kubernetes.io/name: <rhdh_cr_type> 6 endpoints: - port: http-metrics path: '/metrics'",
"apply -f <filename>",
"upstream: metrics: serviceMonitor: enabled: true path: /metrics port: http-metrics",
"upstream: backstage: # --- TRUNCATED --- podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics' prometheus.io/port: '9464' prometheus.io/scheme: 'http'",
"Update OPERATOR_NS accordingly OPERATOR_NS=rhdh-operator edit configmap backstage-default-config -n \"USD{OPERATOR_NS}\"",
"deployment.yaml: |- apiVersion: apps/v1 kind: Deployment # --- truncated --- spec: template: # --- truncated --- metadata: labels: rhdh.redhat.com/app: # placeholder for 'backstage-<cr-name>' # --- truncated --- annotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics' prometheus.io/port: '9464' prometheus.io/scheme: 'http' # --- truncated ---",
"--namespace=prometheus port-forward deploy/prometheus-server 9090",
"upstream: backstage: # --- Truncated --- extraEnvVars: - name: LOG_LEVEL value: debug",
"spec: # Other fields omitted application: extraEnvs: envs: - name: LOG_LEVEL value: debug",
"fields @timestamp, @message, kubernetes.container_name | filter kubernetes.container_name in [\"install-dynamic-plugins\", \"backstage-backend\"]",
"az aks create/update --resource-group <your-ResourceGroup> --name <your-Cluster> --enable-azure-monitor-metrics",
"upstream: backstage: # --- TRUNCATED --- podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics' prometheus.io/port: '9464' prometheus.io/scheme: 'http'",
"Update OPERATOR_NS accordingly OPERATOR_NS=rhdh-operator edit configmap backstage-default-config -n \"USD{OPERATOR_NS}\"",
"deployment.yaml: |- apiVersion: apps/v1 kind: Deployment # --- truncated --- spec: template: # --- truncated --- metadata: labels: rhdh.redhat.com/app: # placeholder for 'backstage-<cr-name>' # --- truncated --- annotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics' prometheus.io/port: '9464' prometheus.io/scheme: 'http' # --- truncated ---"
] | https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.4/html-single/monitoring_and_logging/index |
Chapter 12. Configure the logical network for gluster traffic | Chapter 12. Configure the logical network for gluster traffic For creating a separate gluster logical network, in Red Hat Hyperconverged Infrastructure for Virtualization (RHHI for Virtualization) 1.7 users had to perform the steps manually via the Red Hat Virtualization Administration portal. From RHHI for Virtualization 1.8 this process can be automated using the ansible playbook as follows: 12.1. Defining the logical network details for gluster traffic Prerequisites Red Hat Hyperconverged Infrastructure for Virtualization deployment is complete with hosts in up state. Procedure Log in to the first hyperconverged host. Change into the hc-ansible-deployment directory: Make a copy of the gluster_network_inventory.yml file for future reference. Define your configuration in the gluster_network_inventory.yml file. Use the example gluster_network_inventory.yml file to define the details on each host. A complete outline of this file is available in Understanding the gluster_network_inventory.yml file . Encrypt the gluster_network_inventory.yml file and specify a password using ansible-vault . The required variables in gluster_network_inventory.yml include password values, so it is important to encrypt the file to protect the password values. Enter and confirm a new vault password when prompted. 12.2. Executing the gluster network playbook Prerequisites Define configuration in the gluster_network_inventory.yml playbook: Section 12.1, "Defining the logical network details for gluster traffic" . Procedure Log in to the first hyperconverged host. Change into the hc-ansible-deployment directory. Run the following command as the root user to start the configuration process. Enter the vault password for this file when prompted to start network configuration. 12.3. Verifying the logical network for gluster traffic Check the following to verify if the logical network for gluster traffic is successfully created and attached to the host. Validate the availability of gluster logical network. Log in to the Administration Portal. Click Network Networks . This should list the newly created gluster_net network. Click on gluster_net click on Clusters tab, hovering the mouse over Network Role column should display Migration Gluster . Validate gluster_net is attached to the storage network interface of all the hosts. Click on Compute Hosts click on any host. Select Network Interfaces tab click on the drop down button near the label Logical Networks corresponding to storage or backend network, you should see the gluster_net as the network name. 12.4. (Optional) Editing the logical network for Jumbo frames If you have Jumbo frames (MTU 9000) enabled, you need to edit the default network configuration to ensure jumbo frames are used for storage traffic. The network components (switch) must support Jumbo frames. The following is the procedure to edit the logical network for Jumbo frames on the storage network, gluster_net here: Prerequisites Logical network for gluster traffic is successfully created and is attached to the host. Procedure Login in to the Administration Portal. Click Networks Network . Click on gluster_net Edit . Select custom MTU and make it as 9000. Click OK . Note Make sure all the network components are enabled with the same MTU. | [
"cd /etc/ansible/roles/gluster.ansible/playbooks/hc-ansible-deployment",
"cp gluster_network_inventory.yml gluster_network_inventory.yml.backup",
"ansible-vault encrypt gluster_network_inventory.yml",
"cd /etc/ansible/roles/gluster.ansible/playbooks/hc-ansible-deployment",
"ansible-playbook -i gluster_network_inventory.yml tasks/gluster_network_setup.yml --ask-vault-pass"
] | https://docs.redhat.com/en/documentation/red_hat_hyperconverged_infrastructure_for_virtualization/1.8/html/deploying_red_hat_hyperconverged_infrastructure_for_virtualization/configure-gluster-rhv-manager |
Appendix D. Configuration Recommendations | Appendix D. Configuration Recommendations D.1. Timeout Values Table D.1. Timeout Value Recommendations for JBoss Data Grid Timeout Value Parent Element Default Value Recommended Value distributedSyncTimeout transport 240,000 (4 minutes) Same as default lockAcquisitionTimeout locking 10,000 (10 seconds) Same as default cacheStopTimeout transaction 30,000 (30 seconds) Same as default completedTxTimeout transaction 60,000 (60 seconds) Same as default replTimeout sync 15,000 (15 seconds) Same as default timeout stateTransfer 240,000 (4 minutes) Same as default timeout backup 10,000 (10 seconds) Same as default flushLockTimeout async 1 (1 millisecond) Same as default. Note that this value applies to asynchronous cache stores, but not asynchronous caches. shutdownTimeout async 25,000 (25 seconds) Same as default. Note that this value applies to asynchronous cache stores, but not asynchronous caches. pushStateTimeout singletonStore 10,000 (10 seconds) Same as default. backup replicationTimeout 10,000 (10 seconds) remoteCallTimeout clusterLoader 0 For most requirements, same as default. This value is usually set to the same as the sync.replTimeout value. Report a bug | null | https://docs.redhat.com/en/documentation/red_hat_data_grid/6.6/html/administration_and_configuration_guide/appe-configuration_recommendations |
8.107. luci | 8.107. luci 8.107.1. RHSA-2013:1603 - Moderate: luci security, bug fix, and enhancement update Updated luci packages that fix two security issues, several bugs, and add two enhancements are now available for Red Hat Enterprise Linux 6. The Red Hat Security Response Team has rated this update as having moderate security impact. Common Vulnerability Scoring System (CVSS) base scores, which give detailed severity ratings, are available for each vulnerability from the CVE links associated with each description below. Luci is a web-based high availability administration application. Security Fixes CVE-2013-4482 A flaw was found in the way the luci service was initialized. If a system administrator started the luci service from a directory that was writable to by a local user, that user could use this flaw to execute arbitrary code as the root or luci user. CVE-2013-4481 A flaw was found in the way luci generated its configuration file. The file was created as world readable for a short period of time, allowing a local user to gain access to the authentication secrets stored in the configuration file. These issues were discovered by Jan Pokorny of Red Hat. Bug Fixes BZ# 917747 Previously, luci did not reflect concurrent additions to fence devices coverage as happened in the fence-agents package. Consequently, Dell iDRAC (idrac), HP iLO2 (ilo2), HP iLO3 (ilo3), HP iLO4 (ilo4), and IBM Integrated Management Module (imm) devices or agents were not honored in luci, leading to the inability to properly work with or to setup a cluster comprising of these devices. This update restores the capability of luci to work with a full intended set of fence devices. BZ#956360 Previously, luci did not run in FIPS mode because it utilized components that were not compliant with FIPS. Both components, the python-breaker library and the python-weberror error handler have been modified to comply with FIPS so that luci now works in FIPS mode as expected. BZ#978479 Due to a bug in the luci code, a data race condition could occur while adding multiple nodes into a cluster with a single request. As a consequence, nodes could have been provided configurations with varying version numbers, leaving the cluster in an unexpected state. The respective luci code has been fixed so this data race cannot be triggered anymore. Multiple nodes can now be added to a cluster at once without a risk of negative consequences. BZ#773491 implementation of dynamic pop-up messages had a high probability of messages leaving the screen unnoticed under certain circumstances. Therefore, the respective luci code has been modified to adjust dynamic pop-ups to appear as static messages, which significantly decreases a chance that the message might be unnoticed. BZ# 883008 Previously, luci did not reflect concurrent additions to parameters for some fence devices (including "cmd_prompt", "login_timeout", "power_timeout", "retry_on", "shell_timeout") or respective instances ("delay") as happened in the fence-agents package. Consequently, the valid parameters could be dropped from the respective part of the configuration upon submitting the dedicated forms in luci. This update restores the capability of luci to work with a full intended set of fence agents parameters and, in turn, prevents luci from unexpectedly discarding the already configured parameters. BZ#896244 Due to a bug in the cluster.conf(5) man page, luci expected the default value for the syslog_facility option in the cluster logging configuration to be "daemon" instead of the actual default value "local4". Consequently, all logging configuration items without "syslog_facility" explicitly set were thus marked as having "Syslog Message Facility" of "daemon" in luci. This could result in no cluster messages being logged into the custom log file for the rules containing "daemon.*". With this update, luci correctly recognizes "local4" as the default syslog message facility and logging configuration items in luci are marked accordingly by default. The user is now able to effectively set the syslog facility of the logging configuration item to be "daemon". In such a case, cluster messages are logged into log files containing the "daemon.*" rules as expected. BZ# 886517 The luci application did not automatically enable the ricci and modclusterd services upon creating a new cluster or adding a node to the existing cluster. Therefore, an administrator's intervention was necessary because these services are essential for managing the cluster during its life-cycle. Without these services, luci sustained the contact with cluster nodes, preventing the cluster from rebooting. With this update, luci has been modified to enable the ricci and modclusterd services on every cluster's node when creating a new cluster or adding a node to the existing cluster. The administrator's intervention is no longer needed in the aforementioned scenario. BZ# 878149 Previously, if no cluster node could have been contacted on certain luci pages, luci displayed the Error 500 message on that page and logged an error message with a traceback into its log. As an appropriate response to this situation, this update modifies luci to display one of the following messages: BZ# 880363 Due to a bug in luci validation code, a confusing validation error message was displayed if a non-existing failover domain in the "Failover Domains" tab was specified. This bug has been fixed and luci now processes these validation errors correctly, displaying appropriate error messages as expected. BZ#878960 The "User preferences" page was accessible without authentication, which allowed an anonymous user disabling or enabling "expert" mode. Although this behavior had no direct security impact, consistency in assigned authorization is considered to be best practice. This update modifies luci to strictly require users to be authenticated before accessing this "Preferences" page. BZ# 886576 The "Remove this instance" button in the "Edit Fence Instance" form had no function and could have misled cluster administrators. This button has been removed so the aforementioned form now shows only the relevant content. BZ# 1001835 The luci application incorrectly considered the "module_name" parameter of the Dell DRAC 5 fence device as mandatory. Therefore, such a fence device could not have been created without specifying its module name. The validation code has been fixed so luci now treats this parameter as optional, and Dell DRAC 5 fence devices can now be successfully created without module names. Enhancements BZ#917814 A confirmation pop-up dialog has been added that prevents luci from removing selected clusters accidentally. BZ#983693 The luci application now reflects the concurrent extension to the oracledb, orainstance, and oralistener resource agents regarding Oracle Database 11g support. This also includes the ability to configure the newly supported TNS_ADMIN variable to allow for wider customization. All luci users are advised to upgrade to these updated packages, which contain backported patches to correct these issues and add these enhancements. After installing this update, the luci service will be restarted automatically. | [
"Unable to contact any of the nodes in this cluster.",
"No nodes from this cluster could be contacted. The status of this cluster is unknown"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.5_technical_notes/luci |
Chapter 2. Distributed tracing architecture | Chapter 2. Distributed tracing architecture 2.1. Distributed tracing architecture Every time a user takes an action in an application, a request is executed by the architecture that may require dozens of different services to participate to produce a response. Red Hat OpenShift distributed tracing platform lets you perform distributed tracing, which records the path of a request through various microservices that make up an application. Distributed tracing is a technique that is used to tie the information about different units of work together - usually executed in different processes or hosts - to understand a whole chain of events in a distributed transaction. Developers can visualize call flows in large microservice architectures with distributed tracing. It is valuable for understanding serialization, parallelism, and sources of latency. Red Hat OpenShift distributed tracing platform records the execution of individual requests across the whole stack of microservices, and presents them as traces. A trace is a data/execution path through the system. An end-to-end trace is comprised of one or more spans. A span represents a logical unit of work in Red Hat OpenShift distributed tracing platform that has an operation name, the start time of the operation, and the duration, as well as potentially tags and logs. Spans may be nested and ordered to model causal relationships. 2.1.1. Distributed tracing overview As a service owner, you can use distributed tracing to instrument your services to gather insights into your service architecture. You can use the Red Hat OpenShift distributed tracing platform for monitoring, network profiling, and troubleshooting the interaction between components in modern, cloud-native, microservices-based applications. With the distributed tracing platform, you can perform the following functions: Monitor distributed transactions Optimize performance and latency Perform root cause analysis 2.1.2. Red Hat OpenShift distributed tracing platform features Red Hat OpenShift distributed tracing platform provides the following capabilities: Integration with Kiali - When properly configured, you can view distributed tracing platform data from the Kiali console. High scalability - The distributed tracing platform back end is designed to have no single points of failure and to scale with the business needs. Distributed Context Propagation - Enables you to connect data from different components together to create a complete end-to-end trace. Backwards compatibility with Zipkin - Red Hat OpenShift distributed tracing platform has APIs that enable it to be used as a drop-in replacement for Zipkin, but Red Hat is not supporting Zipkin compatibility in this release. 2.1.3. Red Hat OpenShift distributed tracing platform architecture Red Hat OpenShift distributed tracing platform is made up of several components that work together to collect, store, and display tracing data. Red Hat OpenShift distributed tracing platform (Tempo) - This component is based on the open source Grafana Tempo project . Gateway - The Gateway handles authentication, authorization, and forwarding requests to the Distributor or Query front-end service. Distributor - The Distributor accepts spans in multiple formats including Jaeger, OpenTelemetry, and Zipkin. It routes spans to Ingesters by hashing the traceID and using a distributed consistent hash ring. Ingester - The Ingester batches a trace into blocks, creates bloom filters and indexes, and then flushes it all to the back end. Query Frontend - The Query Frontend is responsible for sharding the search space for an incoming query. The search query is then sent to the Queriers. The Query Frontend deployment exposes the Jaeger UI through the Tempo Query sidecar. Querier - The Querier is responsible for finding the requested trace ID in either the Ingesters or the back-end storage. Depending on parameters, it can query the Ingesters and pull Bloom indexes from the back end to search blocks in object storage. Compactor - The Compactors stream blocks to and from the back-end storage to reduce the total number of blocks. Red Hat build of OpenTelemetry - This component is based on the open source OpenTelemetry project . OpenTelemetry Collector - The OpenTelemetry Collector is a vendor-agnostic way to receive, process, and export telemetry data. The OpenTelemetry Collector supports open-source observability data formats, for example, Jaeger and Prometheus, sending to one or more open-source or commercial back-ends. The Collector is the default location instrumentation libraries export their telemetry data. Red Hat OpenShift distributed tracing platform (Jaeger) - This component is based on the open source Jaeger project . Important The Red Hat OpenShift distributed tracing platform (Jaeger) is a deprecated feature. Deprecated functionality is still included in OpenShift Container Platform and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. The Red Hat OpenShift distributed tracing platform Operator (Jaeger) will be removed from the redhat-operators catalog in a future release. For more information, see the Red Hat Knowledgebase solution Jaeger Deprecation and Removal in OpenShift . Users must migrate to the Tempo Operator and the Red Hat build of OpenTelemetry for distributed tracing collection and storage. For the most recent list of major functionality that has been deprecated or removed within OpenShift Container Platform, refer to the Deprecated and removed features section of the OpenShift Container Platform release notes. Client (Jaeger client, Tracer, Reporter, instrumented application, client libraries)- The distributed tracing platform (Jaeger) clients are language-specific implementations of the OpenTracing API. They might be used to instrument applications for distributed tracing either manually or with a variety of existing open source frameworks, such as Camel (Fuse), Spring Boot (RHOAR), MicroProfile (RHOAR/Thorntail), Wildfly (EAP), and many more, that are already integrated with OpenTracing. Agent (Jaeger agent, Server Queue, Processor Workers) - The distributed tracing platform (Jaeger) agent is a network daemon that listens for spans sent over User Datagram Protocol (UDP), which it batches and sends to the Collector. The agent is meant to be placed on the same host as the instrumented application. This is typically accomplished by having a sidecar in container environments such as Kubernetes. Jaeger Collector (Collector, Queue, Workers) - Similar to the Jaeger agent, the Jaeger Collector receives spans and places them in an internal queue for processing. This allows the Jaeger Collector to return immediately to the client/agent instead of waiting for the span to make its way to the storage. Storage (Data Store) - Collectors require a persistent storage backend. Red Hat OpenShift distributed tracing platform (Jaeger) has a pluggable mechanism for span storage. Red Hat OpenShift distributed tracing platform (Jaeger) supports the Elasticsearch storage. Query (Query Service) - Query is a service that retrieves traces from storage. Ingester (Ingester Service) - Red Hat OpenShift distributed tracing platform can use Apache Kafka as a buffer between the Collector and the actual Elasticsearch backing storage. Ingester is a service that reads data from Kafka and writes to the Elasticsearch storage backend. Jaeger Console - With the Red Hat OpenShift distributed tracing platform (Jaeger) user interface, you can visualize your distributed tracing data. On the Search page, you can find traces and explore details of the spans that make up an individual trace. 2.1.4. Additional resources Red Hat build of OpenTelemetry | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.16/html/distributed_tracing/distributed-tracing-architecture |
1.2.2. Defining Assessment and Testing | 1.2.2. Defining Assessment and Testing Vulnerability assessments may be broken down into one of two types: outside looking in and inside looking around . When performing an outside-looking-in vulnerability assessment, you are attempting to compromise your systems from the outside. Being external to your company provides you with the attacker's viewpoint. You see what an attacker sees - publicly-routable IP addresses, systems on your DMZ , external interfaces of your firewall, and more. DMZ stands for "demilitarized zone", which corresponds to a computer or small subnetwork that sits between a trusted internal network, such as a corporate private LAN, and an untrusted external network, such as the public Internet. Typically, the DMZ contains devices accessible to Internet traffic, such as Web (HTTP) servers, FTP servers, SMTP (e-mail) servers and DNS servers. When you perform an inside-looking-around vulnerability assessment, you are at an advantage since you are internal and your status is elevated to trusted. This is the viewpoint you and your co-workers have once logged on to your systems. You see print servers, file servers, databases, and other resources. There are striking distinctions between the two types of vulnerability assessments. Being internal to your company gives you more privileges than an outsider. In most organizations, security is configured to keep intruders out. Very little is done to secure the internals of the organization (such as departmental firewalls, user-level access controls, and authentication procedures for internal resources). Typically, there are many more resources when looking around inside as most systems are internal to a company. Once you are outside the company, your status is untrusted. The systems and resources available to you externally are usually very limited. Consider the difference between vulnerability assessments and penetration tests . Think of a vulnerability assessment as the first step to a penetration test. The information gleaned from the assessment is used for testing. Whereas the assessment is undertaken to check for holes and potential vulnerabilities, the penetration testing actually attempts to exploit the findings. Assessing network infrastructure is a dynamic process. Security, both information and physical, is dynamic. Performing an assessment on the system shows an overview, which can turn up false positives and false negatives. A false positive is a result, where the tool finds vulnerabilities which in reality do not exist. A false negative is when it omits actual vulnerabilities. Security administrators are only as good as the tools they use and the knowledge they retain. Take any of the assessment tools currently available, run them against your system, and it is almost a guarantee that there are some false positives. Whether by program fault or user error, the result is the same. The tool may find false positives, or, even worse, false negatives. Now that the difference between a vulnerability assessment and a penetration test is defined, take the findings of the assessment and review them carefully before conducting a penetration test as part of your new best practices approach. Warning Do not attempt to exploit vulnerabilities on production systems. Doing so can have adverse effects on productivity and efficiency of your systems and network. The following list examines some of the benefits to performing vulnerability assessments. Creates proactive focus on information security. Finds potential exploits before attackers find them. Results in systems being kept up to date and patched. Promotes growth and aids in developing staff expertise. Abates financial loss and negative publicity. 1.2.2.1. Establishing a Methodology To aid in the selection of tools for a vulnerability assessment, it is helpful to establish a vulnerability assessment methodology. Unfortunately, there is no predefined or industry approved methodology at this time; however, common sense and best practices can act as a sufficient guide. What is the target? Are we looking at one server, or are we looking at our entire network and everything within the network? Are we external or internal to the company? The answers to these questions are important as they help determine not only which tools to select but also the manner in which they are used. To learn more about establishing methodologies, see the following websites: http://www.owasp.org/ The Open Web Application Security Project | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/security_guide/sect-security_guide-vulnerability_assessment-defining_assessment_and_testing |
Part II. Advanced Configuration | Part II. Advanced Configuration Contains cookbook-style scenarios for advanced Red Hat OpenStack Platform Networking features. | null | https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.0/html/networking_guide/advanced_configuration |
15.7. Actions | 15.7. Actions 15.7.1. Start Virtual Machine Action The start action launches a stopped, shutdown, or suspended virtual machine. Example 15.46. Action to start a virtual machine The start action allows a vm element to be provided as a parameter. If a vm element is provided, the virtual machine uses the values from the provided element and overrides system settings at start time. Using the start action with the vm element in REST API is equivalent to using the Run Once window in the Administration or User Portal. These settings persist until a user stops the virtual machine. Examples of these elements include os , domain , placement_policy , cdroms , stateless and display type . Example 15.47. Action to start a virtual machine with overridden parameters Note The domain element is used for Windows systems only for overriding parameters on boot with the start action. The domain element determines the domain that the Windows virtual machine joins. If the domain does not exist in the domains collection, this element requires additional user authentication details, including a user_name and password . If the domain exists in the domains collection, the action requires no additional user authentication details. The CD image and floppy disk file must be available in the ISO domain already. If not, use the ISO uploader tool to upload the files. See The ISO Uploader Tool for more information. 15.7.2. Start Virtual Machine with Cloud-Init Action Cloud-Init is a tool for automating the initial setup of virtual machines. You can use the tool to configure the host name, network interfaces, the DNS service, authorized keys, and set user names and passwords. You can also use the custom_script tag to specify a custom script to run on the virtual machine when it boots. Note The cloud-init element can only be used to start virtual machines with the cloud-init package installed. When the cloud-init element is used, any element within the initialization element but outside the cloud-init element will be ignored. Example 15.48. Action to start a virtual machine using Cloud-Init This example shows you how to start a virtual machine using the Cloud-Init tool to set the host name, change the root password, set a static IP for the eth0 interface, configure DNS, and add an SSH key for the root user. 15.7.3. Stop Virtual Machine Action The stop action forces a virtual machine to power-off. Example 15.49. Action to stop a virtual machine 15.7.4. Shutdown Virtual Machine Action The shutdown action sends a shutdown request to a virtual machine. Example 15.50. Action to send a shutdown request to a virtual machine 15.7.5. Suspend Virtual Machine Action The suspend action saves the virtual machine state to disk and stops it. Start a suspended virtual machine and restore the virtual machine state with the start action. Example 15.51. Action to save virtual machine state and suspend the machine 15.7.6. Reboot Virtual Machine Action The reboot action sends a reboot request to a virtual machine. Example 15.52. Action to send a reboot request to a virtual machine 15.7.7. Enable user logon to access a virtual machine from an external console The logon action enables users to access a virtual machine from consoles outside of the Red Hat Virtualization environment. This action requires the ovirt-guest-agent-gdm-plugin and the ovirt-guest-agent-pam-module packages to be installed and the ovirt-guest-agent service to be running on the virtual machine. Users require the appropriate user permissions for the virtual machine in order to access the virtual machine from an external console. Example 15.53. Logging onto a virtual machine 15.7.8. Detach Virtual Machine from Pool Action The detach action detaches a virtual machine from a pool. Example 15.54. Action to detach a virtual machine 15.7.9. Migrate Virtual Machine Action The migrate action migrates a virtual machine to another physical host. The destination host element is an optional element as Red Hat Virtualization Manager automatically selects a default host for migration. If an API user requires a specific host , the user can specify the host with either an id or name parameter. Example 15.55. Action to migrate a virtual machine to another host 15.7.10. Cancel Virtual Machine Migration Action The cancel migration action stops any migration of a virtual machine to another physical host. Example 15.56. Action to cancel migration of a virtual machine to another host 15.7.11. Export Virtual Machine Action Note The export storage domain is deprecated. Storage data domains can be unattached from a data center and imported to another data center in the same environment, or in a different environment. Virtual machines, floating virtual disk images, and templates can then be uploaded from the imported storage domain to the attached data center. See the Importing Existing Storage Domains section in the Red Hat Virtualization Administration Guide for information on importing storage domains. The export action exports a virtual machine to an export storage domain. A destination storage domain must be specified with a storage_domain reference. The export action reports a failed action if a virtual machine of the same name exists in the destination domain. Set the exclusive parameter to true to change this behavior and overwrite any existing virtual machine. If snapshots of the virtual machine are not included with the exported virtual machine, set the discard_snapshots parameter to true . Example 15.57. Action to export a virtual machine to an export storage domain 15.7.12. Virtual Machine Ticket Action The ticket action generates a time-sensitive authentication token for accessing a virtual machine's display. The client-provided action optionally includes a ticket representation containing a value (if the token string needs to take on a particular form) and/or an expiry time in minutes. In any case, the response specifies the actual ticket value and expiry used. Example 15.58. Action to generate authentication token for a virtual machine 15.7.13. Force Remove Virtual Machine Action An API user forces the removal of a faulty virtual machine with the force action. This action requires a DELETE method. The request body contains an action representation with the force parameter set to true . The request also requires an additional Content-type: application/xml header to process the XML representation in the body. Example 15.59. Force remove action on a virtual machine 15.7.14. Freeze Virtual Machine Filesystems Action The freezefilesystems action freezes a virtual machine's filesystems using the QEMU guest agent when taking a live snapshot of a running virtual machine. Normally, this is done automatically by the Manager, but this must be executed manually with the REST API for virtual machines using OpenStack Volume (Cinder) disks. Freezing the filesystems on the guest operating system ensures a consistent snapshot. Once the snapshot is finished, the guest filesystems must then be thawed. On virtual machines not using a OpenStack Volume disk, the freezing and thawing actions can also be invoked manually using the REST API, which can be useful in the case of a failure during the snapshot process. Example 15.60. Action to freeze a virtual machine's filesystems For more information on snapshots, see Section 15.6.5.1, "Snapshots Sub-Collection" or the Snapshots section in the Red Hat Virtualization Virtual Machine Management Guide . 15.7.15. Thaw Virtual Machine Filesystems Action The thawfilesystems action thaws a virtual machine's filesystems using the QEMU guest agent when taking a live snapshot of a running virtual machine. Normally, this is done automatically by the Manager, but this must be executed manually with the REST API for virtual machines using OpenStack Volume (Cinder) disks. Freezing the filesystems on the guest operating system ensures a consistent snapshot. Once the snapshot is finished, the guest filesystems must then be thawed. On virtual machines not using a OpenStack Volume disk, the freezing and thawing actions can also be invoked manually using the REST API, which can be useful in the case of a failure during the snapshot process. For example, if the virtual machine became unresponsive during thaw, you can execute the thaw operation again manually; otherwise the virtual machine may remain unresponsive. Example 15.61. Action to thaw a virtual machine's filesystems For more information on snapshots, see Section 15.6.5.1, "Snapshots Sub-Collection" or the Snapshots section in the Red Hat Virtualization Virtual Machine Management Guide . | [
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/start HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/start HTTP/1.1 Accept: application/xml Content-type: application/xml <action> <pause>true</pause> <vm> <stateless>true</stateless> <display> <type>spice</type> </display> <os> <boot dev=\"cdrom\"/> </os> <cdroms> <cdrom> <file id=\"windows-xp.iso\"/> </cdrom> </cdroms> <floppies> <floppy> <file id=\"virtio-win_x86.vfd\"/> </floppy> </floppies> <domain> <name>domain.example.com</name> <user> <user_name>domain_user</user_name> <password>domain_password</password> </user> </domain> <placement_policy> <host id=\"02447ac6-bcba-448d-ba2b-f0f453544ed2\"/> </placement_policy> </vm> </action>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/start HTTP/1.1 Accept: application/xml Content-type: application/xml <action> <vm> <initialization> <cloud_init> <host> <address>MyHost.MyDomain.com</address> </host> <users> <user> <user_name>root</user_name> <password>p@55w0rd!</password> </user> </users> <network_configuration> <nics> <nic> <name>eth0</name> <boot_protocol>static</boot_protocol> <network> <ip address=\"192.168.122.31\" netmask=\"255.255.255.0\" gateway=\"192.168.122.1\"/> </network> <on_boot>true</on_boot> </nic> </nics> <dns> <servers> <host> <address>192.168.122.1</address> </host> </servers> <search_domains> <host> <address>MyDomain.com</address> </host> </search_domains> </dns> </network_configuration> <authorized_keys> <authorized_key> <user> <user_name>root</user_name> </user> <key>ssh-rsa AAAAB3Nza[...]75zkdD [email protected]</key> </authorized_key> </authorized_keys> </cloud_init> <custom_script><![CDATA[your script]]></custom_script> </initialization> </vm> </action>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/stop HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/shutdown HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/suspend HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/reboot HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/logon HTTP/1.1 Content-Type: application/json Content-Length: 2 {}",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/detach HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/migrate HTTP/1.1 Accept: application/xml Content-type: application/xml <action> <host id=\"2ab5e1da-b726-4274-bbf7-0a42b16a0fc3\"/> </action>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/cancelmigration HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/export HTTP/1.1 Accept: application/xml Content-type: application/xml <action> <storage_domain> <name>export1</name> </storage_domain> <exclusive>true</exclusive> <discard_snapshots>true</discard_snapshots> </action>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/ticket HTTP/1.1 Accept: application/xml Content-type: application/xml <action> <ticket> <expiry>120</expiry> </ticket> </action> 200 OK Content-Type: application/xml <action id=\"94e07552-14ba-4c27-8ce6-2cc75190d3ef\" href=\"/ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/ticket/ 94e07552-14ba-4c27-8ce6-2cc75190d3ef\"> <status> <state>complete</state> </status> <ticket> <value>5c7CSzK8Sw41</value> <expiry>120</expiry> </ticket> <link rel=\"parent\" href=\"/ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720\"/> <link rel=\"replay\" href=\"/ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/ticket\"/> </action>",
"DELETE /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720 HTTP/1.1 Accept: application/xml Content-type: application/xml <action> <force>true</force> </action>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/freezefilesystems HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>",
"POST /ovirt-engine/api/vms/5114bb3e-a4e6-44b2-b783-b3eea7d84720/thawfilesystems HTTP/1.1 Accept: application/xml Content-type: application/xml <action/>"
] | https://docs.redhat.com/en/documentation/red_hat_virtualization/4.3/html/version_3_rest_api_guide/sect-Actions |
14.3.3. Domain Controller | 14.3.3. Domain Controller A domain controller in Windows NT is functionally similar to a Network Information Service (NIS) server in a Linux environment. Domain controllers and NIS servers both host user/group information databases as well as related services. Domain controllers are mainly used for security, including the authentication of users accessing domain resources. The service that maintains the user/group database integrity is called the Security Account Manager (SAM). The SAM database is stored differently between Windows and Linux Samba-based systems, therefore SAM replication cannot be achieved and platforms cannot be mixed in a PDC/BDC environment. In a Samba environment, there can be only one PDC and zero or more BDCs. Important Samba cannot exist in a mixed Samba/Windows domain controller environment (Samba cannot be a BDC of a Windows PDC or vice versa). Alternatively, Samba PDCs and BDCs can coexist. 14.3.3.1. Primary Domain Controller (PDC) using tdbsam The simplest and most common implementation of a Samba PDC uses the tdbsam password database backend. Planned to replace the aging smbpasswd backend, tdbsam has numerous improvements that are explained in more detail in Section 14.5, " Samba Account Information Databases " . The passdb backend directive controls which backend is to be used for the PDC. Note If you need more than one domain controller or have more than 250 users, do not use a tdbsam authentication backend. LDAP is recommended in these cases. | [
"[global] workgroup = DOCS netbios name = DOCS_SRV passdb backend = tdbsam security = user add user script = /usr/sbin/useradd -m %u delete user script = /usr/sbin/userdel -r %u add group script = /usr/sbin/groupadd %g delete group script = /usr/sbin/groupdel %g add user to group script = /usr/sbin/usermod -G %g %u add machine script = /usr/sbin/useradd -s /bin/false -d /dev/null -g machines %u The following specifies the default logon script Per user logon scripts can be specified in the user account using pdbedit logon script = logon.bat This sets the default profile path. Set per user paths with pdbedit logon path = \\\\%L\\Profiles\\%U logon drive = H: logon home = \\\\%L\\%U domain logons = Yes os level = 35 preferred master = Yes domain master = Yes idmap uid = 15000-20000 idmap gid = 15000-20000 [homes] comment = Home Directories valid users = %S read only = No browseable = No writable = Yes [public] comment = Data path = /export force user = docsbot force group = users guest ok = Yes [netlogon] comment = Network Logon Service path = /var/lib/samba/netlogon/scripts admin users = ed, john, sam guest ok = No browseable = No writable = No For profiles to work, create a user directory under the path shown. mkdir -p /var/lib/samba/profiles/john [Profiles] comment = Roaming Profile Share path = /var/lib/samba/profiles read only = No browseable = No guest ok = Yes profile acls = Yes Other resource shares"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/reference_guide/s2-samba-domain-controller |
Chapter 11. Compiler and Tools | Chapter 11. Compiler and Tools OpenJDK8 Red Hat Enterprise Linux 6.6 features the java-1.8.0-openjdk packages, which contain the latest version of the Open Java Development Kit, OpenJDK8, that is now fully supported. These packages provide a fully compliant implementation of Java SE 8 and may be used in parallel with the existing java-1.7.0-openjdk packages, which remain available in Red Hat Enterprise Linux 6.6. Java 8 brings numerous new improvements, such as Lambda expressions, default methods, a new Stream API for collections, JDBC 4.2, hardware AES support, and much more. In addition to these, OpenJDK8 contains numerous other performance updates and bug fixes. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.6_release_notes/chap-compiler_and_tools |
Chapter 6. View OpenShift Data Foundation Topology | Chapter 6. View OpenShift Data Foundation Topology The topology shows the mapped visualization of the OpenShift Data Foundation storage cluster at various abstraction levels and also lets you to interact with these layers. The view also shows how the various elements compose the Storage cluster altogether. Procedure On the OpenShift Web Console, navigate to Storage Data Foundation Topology . The view shows the storage cluster and the zones inside it. You can see the nodes depicted by circular entities within the zones, which are indicated by dotted lines. The label of each item or resource contains basic information such as status and health or indication for alerts. Choose a node to view node details on the right-hand panel. You can also access resources or deployments within a node by clicking on the search/preview decorator icon. To view deployment details Click the preview decorator on a node. A modal window appears above the node that displays all of the deployments associated with that node along with their statuses. Click the Back to main view button in the model's upper left corner to close and return to the view. Select a specific deployment to see more information about it. All relevant data is shown in the side panel. Click the Resources tab to view the pods information. This tab provides a deeper understanding of the problems and offers granularity that aids in better troubleshooting. Click the pod links to view the pod information page on OpenShift Container Platform. The link opens in a new window. | null | https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.18/html/deploying_openshift_data_foundation_on_vmware_vsphere/viewing-odf-topology_rhodf |
Chapter 12. Integrations | Chapter 12. Integrations 12.1. Integrating Serverless with the cost management service Cost management is an OpenShift Container Platform service that enables you to better understand and track costs for clouds and containers. It is based on the open source Koku project. 12.1.1. Prerequisites You have cluster administrator permissions. You have set up cost management and added an OpenShift Container Platform source . 12.1.2. Using labels for cost management queries Labels, also known as tags in cost management, can be applied for nodes, namespaces or pods. Each label is a key and value pair. You can use a combination of multiple labels to generate reports. You can access reports about costs by using the Red Hat hybrid console . Labels are inherited from nodes to namespaces, and from namespaces to pods. However, labels are not overridden if they already exist on a resource. For example, Knative services have a default app=<revision_name> label: Example Knative service default label apiVersion: serving.knative.dev/v1 kind: Service metadata: name: example-service spec: ... labels: app: <revision_name> ... If you define a label for a namespace, such as app=my-domain , the cost management service does not take into account costs coming from a Knative service with the tag app=<revision_name> when querying the application using the app=my-domain tag. Costs for Knative services that have this tag must be queried under the app=<revision_name> tag. 12.1.3. Additional resources Configure tagging for your sources Use the Cost Explorer to visualize your costs 12.2. Using NVIDIA GPU resources with serverless applications NVIDIA supports experimental use of GPU resources on OpenShift Container Platform. See OpenShift Container Platform on NVIDIA GPU accelerated clusters for more information about setting up GPU resources on OpenShift Container Platform. 12.2.1. Specifying GPU requirements for a service After GPU resources are enabled for your OpenShift Container Platform cluster, you can specify GPU requirements for a Knative service using the Knative ( kn ) CLI. Prerequisites The OpenShift Serverless Operator, Knative Serving and Knative Eventing are installed on the cluster. You have installed the Knative ( kn ) CLI. GPU resources are enabled for your OpenShift Container Platform cluster. You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in OpenShift Container Platform. Note Using NVIDIA GPU resources is not supported for IBM Z and IBM Power Systems. Procedure Create a Knative service and set the GPU resource requirement limit to 1 by using the --limit nvidia.com/gpu=1 flag: USD kn service create hello --image <service-image> --limit nvidia.com/gpu=1 A GPU resource requirement limit of 1 means that the service has 1 GPU resource dedicated. Services do not share GPU resources. Any other services that require GPU resources must wait until the GPU resource is no longer in use. A limit of 1 GPU also means that applications exceeding usage of 1 GPU resource are restricted. If a service requests more than 1 GPU resource, it is deployed on a node where the GPU resource requirements can be met. Optional. For an existing service, you can change the GPU resource requirement limit to 3 by using the --limit nvidia.com/gpu=3 flag: USD kn service update hello --limit nvidia.com/gpu=3 12.2.2. Additional resources Setting resource quotas for extended resources | [
"apiVersion: serving.knative.dev/v1 kind: Service metadata: name: example-service spec: labels: app: <revision_name>",
"kn service create hello --image <service-image> --limit nvidia.com/gpu=1",
"kn service update hello --limit nvidia.com/gpu=3"
] | https://docs.redhat.com/en/documentation/openshift_container_platform/4.7/html/serverless/integrations |
probe::signal.wakeup | probe::signal.wakeup Name probe::signal.wakeup - Sleeping process being wakened for signal Synopsis Values resume Indicates whether to wake up a task in a STOPPED or TRACED state state_mask A string representation indicating the mask of task states to wake. Possible values are TASK_INTERRUPTIBLE, TASK_STOPPED, TASK_TRACED, and TASK_INTERRUPTIBLE. pid_name Name of the process to wake sig_pid The PID of the process to wake | [
"signal.wakeup"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/systemtap_tapset_reference/api-signal-wakeup |
Chapter 3. December 2024 | Chapter 3. December 2024 3.1. Red Hat Insights for Red Hat Enterprise Linux 3.1.1. General Updated Registration Assistant experience An updated experience of the Registration Assistant is now available in production. Key improvements include: A streamlined and simplified experience that allows a copy-paste command that gets you started with minimal effort. A more concise and focused user interface that reduces unnecessary text and contains more relevant content. A more comprehensive experience that allows you to use activation keys, and provides a recommendation for your organization to adopt remote host configuration (RHC) with RHEL 9.0 and later. Note Procedures referencing basic authentication are no longer included in the Registration Assistant because of end-of-life (EOL) for that method of authentication. 3.2. Advisor New recommendations The Red Hat Insights advisor service now detects and recommends fixes for critical issues including: Kernel crashing after reboot when the CIFS filesystem is mounted or when using ethtool with the ice network interface driver, Kernel panic after reboot when the GFS2 filesystem is mounted with quota enabled Boot failure when the RHEL system is running on the Microsoft Hyper-V 2016 platform. The advisor service now detects and recommends solutions for the following issues: Kernel crash occurs after reboot due to a write callback exception when CIFS filesystem is mounted Kernel crash occurs after reboot when using ethtool with the ice network interface driver Kernel panic occurs on the edge computing system after reboot when the GFS2 filesystem is mounted with quota enabled UEFI VM of Windows Hyper-V 2016 server does not boot since kernel-5.14.0-407.el9 The Ceph Metadata Server will get stuck and report slow requests due to a known issue in the default kernel of CephFS Clients on edge computing systems The network throughput performance will decrease after reboot when "virtio-net" is used on RHEL 9.5 edge computing guests due to a bug in the default kernel [RHEL-9] dump fails on azure guest [M416s v2] 3.3. Content templates (replacing Patch templates) Content templates are available in preview Patching connected RHEL systems with content management is now available in Insights Preview, with the introduction of content templates. Content templates replace patch templates, by using a RHEL version and a date to create a defined set of packages and advisories that you can install in your environment. You can further customize this content definition with snapshots of custom repositories. Pairing the control of content templates with the patching orchestration of Ansible Automation Platform, offers an ideal patching solution for your connected systems. You can continue taking advantage of Red Hat Satellite's advanced content management capabilities to manage your RHEL systems, including content caching and content views. Content templates are a simplified form of content management designed for you when your systems connect directly to Red Hat services and the Red Hat Content Delivery Network (CDN). To get started using content templates, do the following: Make sure you have Organization Administrator permissions or have the Content Template administrator role assigned in User Access. Navigate to Insights > Content > Templates , and log in, if needed. Click the Preview toggle to enable preview features. Additional resources User Access Configuration Guide for Role-based Access Control (RBAC) " Managing content templates " in Deploying and managing RHEL systems in hybrid clouds Transition of Red Hat Insights patch templates to content templates | null | https://docs.redhat.com/en/documentation/red_hat_insights/1-latest/html/release_notes/december-2024 |
Chapter 4. Installing a cluster quickly on Azure | Chapter 4. Installing a cluster quickly on Azure In OpenShift Container Platform version 4.16, you can install a cluster on Microsoft Azure that uses the default configuration options. 4.1. Prerequisites You reviewed details about the OpenShift Container Platform installation and update processes. You read the documentation on selecting a cluster installation method and preparing it for users . You configured an Azure account to host the cluster and determined the tested and validated region to deploy the cluster to. If you use a firewall, you configured it to allow the sites that your cluster requires access to. 4.2. Internet access for OpenShift Container Platform In OpenShift Container Platform 4.16, you require access to the internet to install your cluster. You must have internet access to: Access OpenShift Cluster Manager to download the installation program and perform subscription management. If the cluster has internet access and you do not disable Telemetry, that service automatically entitles your cluster. Access Quay.io to obtain the packages that are required to install your cluster. Obtain the packages that are required to perform cluster updates. Important If your cluster cannot have direct internet access, you can perform a restricted network installation on some types of infrastructure that you provision. During that process, you download the required content and use it to populate a mirror registry with the installation packages. With some installation types, the environment that you install your cluster in will not require internet access. Before you update the cluster, you update the content of the mirror registry. 4.3. Generating a key pair for cluster node SSH access During an OpenShift Container Platform installation, you can provide an SSH public key to the installation program. The key is passed to the Red Hat Enterprise Linux CoreOS (RHCOS) nodes through their Ignition config files and is used to authenticate SSH access to the nodes. The key is added to the ~/.ssh/authorized_keys list for the core user on each node, which enables password-less authentication. After the key is passed to the nodes, you can use the key pair to SSH in to the RHCOS nodes as the user core . To access the nodes through SSH, the private key identity must be managed by SSH for your local user. If you want to SSH in to your cluster nodes to perform installation debugging or disaster recovery, you must provide the SSH public key during the installation process. The ./openshift-install gather command also requires the SSH public key to be in place on the cluster nodes. Important Do not skip this procedure in production environments, where disaster recovery and debugging is required. Note You must use a local key, not one that you configured with platform-specific approaches such as AWS key pairs . Procedure If you do not have an existing SSH key pair on your local machine to use for authentication onto your cluster nodes, create one. For example, on a computer that uses a Linux operating system, run the following command: USD ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1 1 Specify the path and file name, such as ~/.ssh/id_ed25519 , of the new SSH key. If you have an existing key pair, ensure your public key is in the your ~/.ssh directory. Note If you plan to install an OpenShift Container Platform cluster that uses the RHEL cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64 , ppc64le , and s390x architectures, do not create a key that uses the ed25519 algorithm. Instead, create a key that uses the rsa or ecdsa algorithm. View the public SSH key: USD cat <path>/<file_name>.pub For example, run the following to view the ~/.ssh/id_ed25519.pub public key: USD cat ~/.ssh/id_ed25519.pub Add the SSH private key identity to the SSH agent for your local user, if it has not already been added. SSH agent management of the key is required for password-less SSH authentication onto your cluster nodes, or if you want to use the ./openshift-install gather command. Note On some distributions, default SSH private key identities such as ~/.ssh/id_rsa and ~/.ssh/id_dsa are managed automatically. If the ssh-agent process is not already running for your local user, start it as a background task: USD eval "USD(ssh-agent -s)" Example output Agent pid 31874 Note If your cluster is in FIPS mode, only use FIPS-compliant algorithms to generate the SSH key. The key must be either RSA or ECDSA. Add your SSH private key to the ssh-agent : USD ssh-add <path>/<file_name> 1 1 Specify the path and file name for your SSH private key, such as ~/.ssh/id_ed25519 Example output Identity added: /home/<you>/<path>/<file_name> (<computer_name>) steps When you install OpenShift Container Platform, provide the SSH public key to the installation program. 4.4. Obtaining the installation program Before you install OpenShift Container Platform, download the installation file on the host you are using for installation. Prerequisites You have a computer that runs Linux or macOS, with at least 1.2 GB of local disk space. Procedure Go to the Cluster Type page on the Red Hat Hybrid Cloud Console. If you have a Red Hat account, log in with your credentials. If you do not, create an account. Tip You can also download the binaries for a specific OpenShift Container Platform release . Select your infrastructure provider from the Run it yourself section of the page. Select your host operating system and architecture from the dropdown menus under OpenShift Installer and click Download Installer . Place the downloaded file in the directory where you want to store the installation configuration files. Important The installation program creates several files on the computer that you use to install your cluster. You must keep the installation program and the files that the installation program creates after you finish installing the cluster. Both of the files are required to delete the cluster. Deleting the files created by the installation program does not remove your cluster, even if the cluster failed during installation. To remove your cluster, complete the OpenShift Container Platform uninstallation procedures for your specific cloud provider. Extract the installation program. For example, on a computer that uses a Linux operating system, run the following command: USD tar -xvf openshift-install-linux.tar.gz Download your installation pull secret from Red Hat OpenShift Cluster Manager . This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for OpenShift Container Platform components. Tip Alternatively, you can retrieve the installation program from the Red Hat Customer Portal , where you can specify a version of the installation program to download. However, you must have an active subscription to access this page. 4.5. Deploying the cluster You can install OpenShift Container Platform on a compatible cloud platform. Important You can run the create cluster command of the installation program only once, during initial installation. Prerequisites You have configured an account with the cloud platform that hosts your cluster. You have the OpenShift Container Platform installation program and the pull secret for your cluster. You have an Azure subscription ID and tenant ID. You have the application ID and password of a service principal. Procedure Optional: If you have run the installation program on this computer before, and want to use an alternative service principal, go to the ~/.azure/ directory and delete the osServicePrincipal.json configuration file. Deleting this file prevents the installation program from automatically reusing subscription and authentication values from a installation. Change to the directory that contains the installation program and initialize the cluster deployment: USD ./openshift-install create cluster --dir <installation_directory> \ 1 --log-level=info 2 1 For <installation_directory> , specify the directory name to store the files that the installation program creates. 2 To view different installation details, specify warn , debug , or error instead of info . When specifying the directory: Verify that the directory has the execute permission. This permission is required to run Terraform binaries under the installation directory. Use an empty directory. Some installation assets, such as bootstrap X.509 certificates, have short expiration intervals, therefore you must not reuse an installation directory. If you want to reuse individual files from another cluster installation, you can copy them into your directory. However, the file names for the installation assets might change between releases. Use caution when copying installation files from an earlier OpenShift Container Platform version. Provide values at the prompts: Optional: Select an SSH key to use to access your cluster machines. Note For production OpenShift Container Platform clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your ssh-agent process uses. Select azure as the platform to target. If the installation program cannot locate the osServicePrincipal.json configuration file from a installation, you are prompted for Azure subscription and authentication values. Specify the following Azure parameter values for your subscription and service principal: azure subscription id : Enter the subscription ID to use for the cluster. azure tenant id : Enter the tenant ID. azure service principal client id : Enter its application ID. azure service principal client secret : Enter its password. Select the region to deploy the cluster to. Select the base domain to deploy the cluster to. The base domain corresponds to the Azure DNS Zone that you created for your cluster. Enter a descriptive name for your cluster. Important All Azure resources that are available through public endpoints are subject to resource name restrictions, and you cannot create resources that use certain terms. For a list of terms that Azure restricts, see Resolve reserved resource name errors in the Azure documentation. Paste the pull secret from Red Hat OpenShift Cluster Manager . If previously not detected, the installation program creates an osServicePrincipal.json configuration file and stores this file in the ~/.azure/ directory on your computer. This ensures that the installation program can load the profile when it is creating an OpenShift Container Platform cluster on the target platform. Verification When the cluster deployment completes successfully: The terminal displays directions for accessing your cluster, including a link to the web console and credentials for the kubeadmin user. Credential information also outputs to <installation_directory>/.openshift_install.log . Important Do not delete the installation program or the files that the installation program creates. Both are required to delete the cluster. Example output ... INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: "kubeadmin", and password: "password" INFO Time elapsed: 36m22s Important The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending node-bootstrapper certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for Recovering from expired control plane certificates for more information. It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. 4.6. Installing the OpenShift CLI You can install the OpenShift CLI ( oc ) to interact with OpenShift Container Platform from a command-line interface. You can install oc on Linux, Windows, or macOS. Important If you installed an earlier version of oc , you cannot use it to complete all of the commands in OpenShift Container Platform 4.16. Download and install the new version of oc . Installing the OpenShift CLI on Linux You can install the OpenShift CLI ( oc ) binary on Linux by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the architecture from the Product Variant drop-down list. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.16 Linux Clients entry and save the file. Unpack the archive: USD tar xvf <file> Place the oc binary in a directory that is on your PATH . To check your PATH , execute the following command: USD echo USDPATH Verification After you install the OpenShift CLI, it is available using the oc command: USD oc <command> Installing the OpenShift CLI on Windows You can install the OpenShift CLI ( oc ) binary on Windows by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.16 Windows Client entry and save the file. Unzip the archive with a ZIP program. Move the oc binary to a directory that is on your PATH . To check your PATH , open the command prompt and execute the following command: C:\> path Verification After you install the OpenShift CLI, it is available using the oc command: C:\> oc <command> Installing the OpenShift CLI on macOS You can install the OpenShift CLI ( oc ) binary on macOS by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.16 macOS Clients entry and save the file. Note For macOS arm64, choose the OpenShift v4.16 macOS arm64 Client entry. Unpack and unzip the archive. Move the oc binary to a directory on your PATH. To check your PATH , open a terminal and execute the following command: USD echo USDPATH Verification Verify your installation by using an oc command: USD oc <command> 4.7. Logging in to the cluster by using the CLI You can log in to your cluster as a default system user by exporting the cluster kubeconfig file. The kubeconfig file contains information about the cluster that is used by the CLI to connect a client to the correct cluster and API server. The file is specific to a cluster and is created during OpenShift Container Platform installation. Prerequisites You deployed an OpenShift Container Platform cluster. You installed the oc CLI. Procedure Export the kubeadmin credentials: USD export KUBECONFIG=<installation_directory>/auth/kubeconfig 1 1 For <installation_directory> , specify the path to the directory that you stored the installation files in. Verify you can run oc commands successfully using the exported configuration: USD oc whoami Example output system:admin Additional resources See Accessing the web console for more details about accessing and understanding the OpenShift Container Platform web console. 4.8. Telemetry access for OpenShift Container Platform In OpenShift Container Platform 4.16, the Telemetry service, which runs by default to provide metrics about cluster health and the success of updates, requires internet access. If your cluster is connected to the internet, Telemetry runs automatically, and your cluster is registered to OpenShift Cluster Manager . After you confirm that your OpenShift Cluster Manager inventory is correct, either maintained automatically by Telemetry or manually by using OpenShift Cluster Manager, use subscription watch to track your OpenShift Container Platform subscriptions at the account or multi-cluster level. Additional resources See About remote health monitoring for more information about the Telemetry service 4.9. steps Customize your cluster . If necessary, you can opt out of remote health reporting . | [
"ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1",
"cat <path>/<file_name>.pub",
"cat ~/.ssh/id_ed25519.pub",
"eval \"USD(ssh-agent -s)\"",
"Agent pid 31874",
"ssh-add <path>/<file_name> 1",
"Identity added: /home/<you>/<path>/<file_name> (<computer_name>)",
"tar -xvf openshift-install-linux.tar.gz",
"./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2",
"INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s",
"tar xvf <file>",
"echo USDPATH",
"oc <command>",
"C:\\> path",
"C:\\> oc <command>",
"echo USDPATH",
"oc <command>",
"export KUBECONFIG=<installation_directory>/auth/kubeconfig 1",
"oc whoami",
"system:admin"
] | https://docs.redhat.com/en/documentation/openshift_container_platform_installation/4.16/html/installing_on_azure/installing-azure-default |
Chapter 1. Support policy for Eclipse Temurin | Chapter 1. Support policy for Eclipse Temurin Red Hat will support select major versions of Eclipse Temurin in its products. For consistency, these are the same versions that Oracle designates as long-term support (LTS) for the Oracle JDK. A major version of Eclipse Temurin will be supported for a minimum of six years from the time that version is first introduced. For more information, see the Eclipse Temurin Life Cycle and Support Policy . Note RHEL 6 reached the end of life in November 2020. Because of this, Eclipse Temurin does not support RHEL 6 as a supported configuration. | null | https://docs.redhat.com/en/documentation/red_hat_build_of_openjdk/11/html/release_notes_for_eclipse_temurin_11.0.20/rn-openjdk-temurin-support-policy |
Chapter 1. About Red Hat Insights | Chapter 1. About Red Hat Insights Red Hat Insights is a Software-as-a-Service (SaaS) application included with almost every subscription to Red Hat Enterprise Linux, Red Hat OpenShift, and Red Hat Ansible Automation Platform. Powered by predictive analytics, Red Hat Insights gets smarter with every additional piece of intelligence and data. It can automatically discover relevant insights, recommend tailored, proactive, actions, and even automate tasks. Using Red Hat Insights, customers can benefit from the experience and technical knowledge of Red Hat Certified Engineers, making it easier to identify, prioritize and resolve issues before business operations are affected. As a SaaS offering, located at Red Hat Hybrid Cloud Console , Red Hat Insights is regularly updated. Regular updates expand the Insights knowledge archive in real time to reflect new IT challenges that can impact the stability of mission-critical systems. | null | https://docs.redhat.com/en/documentation/red_hat_insights/1-latest/html/release_notes/release-notes-insights |
Chapter 11. Logging | Chapter 11. Logging 11.1. Enabling protocol logging The client can log AMQP protocol frames to the console. This data is often critical when diagnosing problems. To enable protocol logging, set the PN_TRACE_FRM environment variable to 1 : Example: Enabling protocol logging USD export PN_TRACE_FRM=1 USD <your-client-program> To disable protocol logging, unset the PN_TRACE_FRM environment variable. | [
"export PN_TRACE_FRM=1 <your-client-program>"
] | https://docs.redhat.com/en/documentation/red_hat_amq/2021.q3/html/using_the_amq_python_client/logging |
Chapter 6. Deploying a RHEL for Edge image in a network-based environment | Chapter 6. Deploying a RHEL for Edge image in a network-based environment You can deploy a RHEL for Edge image using the RHEL installer graphical user interface or a Kickstart file. The overall process for deploying a RHEL for Edge image depends on whether your deployment environment is network-based or non-network-based. Note To deploy the images on bare metal, use a Kickstart file. Network-based deployments Deploying a RHEL for Edge image in a network-based environment involves the following high-level steps: Extract the image file contents. Set up a web server Install the image 6.1. Extracting the RHEL for Edge image commit After you download the commit, extract the .tar file and note the ref name and the commit ID. The downloaded commit file consists of a .tar file with an OSTree repository. The OSTree repository has a commit and a compose.json file. The compose.json file has information metadata about the commit with information such as the "Ref", the reference ID and the commit ID. The commit ID has the RPM packages. To extract the package contents, perform the following the steps: Prerequisites Create a Kickstart file or use an existing one. Procedure Extract the downloaded image .tar file: Go to the directory where you have extracted the .tar file. It has a compose.json file and an OSTree directory. The compose.json file has the commit number and the OSTree directory has the RPM packages. Open the compose.json file and note the commit ID number. You need this number handy when you proceed to set up a web server. If you have the jq JSON processor installed, you can also retrieve the commit ID by using the jq tool: List the RPM packages in the commit. Use a Kickstart file to run the RHEL installer. Optionally, you can use any existing file or can create one by using the Kickstart Generator tool. In the Kickstart file, ensure that you include the details about how to provision the file system, create a user, and how to fetch and deploy the RHEL for Edge image. The RHEL installer uses this information during the installation process. The following is a Kickstart file example: The OStree-based installation uses the ostreesetup command to set up the configuration. It fetches the OSTree commit, by using the following flags: --nogpg - Disable GNU Privacy Guard (GPG) key verification. --osname - Management root for the operating system installation. --remote - Management root for the operating system installation --url - URL of the repository to install from. --ref - Name of the branch from the repository that the installation uses. --url=http://mirror.example.com/repo/ - is the address of the host system where you extracted the edge commit and served it over nginx . You can use the address to reach the host system from the guest computer. For example, if you extract the commit image in the /var/www/html directory and serve the commit over nginx on a computer whose hostname is www.example.com , the value of the --url parameter is http://www.example.com/repo . Note Use the http protocol to start a service to serve the commit, because https is not enabled on the Apache HTTP Server. Additional resources Downloading a RHEL for Edge image Creating Kickstart files 6.2. Setting up a web server to install RHEL for Edge images After you have extracted the RHEL for Edge image contents, set up a web server to provide the image commit details to the RHEL installer by using HTTP. The following example provides the steps to set up a web server by using a container. Prerequisites You have installed Podman on your system. See the Red Hat Knowledgebase solution How do I install Podman in RHEL . Procedure Create the nginx configuration file with the following instructions: Create a Dockerfile with the following instructions: Where, kickstart.ks is the name of the Kickstart file from the RHEL for Edge image. The Kickstart file includes directive information. To help you manage the images later, it is advisable to include the checks and settings for greenboot checks. For that, you can update the Kickstart file to include the following settings: Any HTTP service can host the OSTree repository, and the example, which uses a container, is just an option for how to do this. The Dockerfile performs the following tasks: Uses the latest Universal Base Image (UBI) Installs the web server (nginx) Adds the Kickstart file to the server Adds the RHEL for Edge image commit to the server Build a Docker container Run the container As a result, the server is set up and ready to start the RHEL Installer by using the commit.tar repository and the Kickstart file. 6.3. Performing an attended installation to an edge device by using Kickstart For an attended installation in a network-based environment, you can install the RHEL for Edge image to a device by using the RHEL Installer ISO, a Kickstart file, and a web server. The web server serves the RHEL for Edge Commit and the Kickstart file to boot the RHEL Installer ISO image. Prerequisites You have made the RHEL for Edge Commit available by running a web server. See Setting up a web server to install RHEL for Edge images . You have created a .qcow2 disk image to be used as the target of the attended installation. See Creating a virtual disk image by using qemu-img . Procedure Create a Kickstart file. The following is an example in which the ostreesetup directive instructs the Anaconda Installer to fetch and deploy the commit. Additionally, it creates a user and a password. Run the RHEL Anaconda Installer by using the libvirt virt-install utility to create a virtual machine (VM) with a RHEL operating system. Use the .qcow2 disk image as the target disk in the attended installation: On the installation screen: Figure 6.1. Red Hat Enterprise Linux boot menu Press the e key to add an additional kernel parameter: The kernel parameter specifies that you want to install RHEL by using the Kickstart file and not the RHEL image contained in the RHEL Installer. After adding the kernel parameters, press Ctrl + X to boot the RHEL installation by using the Kickstart file. The RHEL Installer starts, fetches the Kickstart file from the server (HTTP) endpoint and executes the commands, including the command to install the RHEL for Edge image commit from the HTTP endpoint. After the installation completes, the RHEL Installer prompts you for login details. Verification On the Login screen, enter your user account credentials and click Enter . Verify whether the RHEL for Edge image is successfully installed. USD rpm-ostree status The command output provides the image commit ID and shows that the installation is successful. Following is a sample output: Additional resources How to embed a Kickstart file into an ISO image (Red Hat Knowledgebase) Booting the installation 6.4. Performing an unattended installation to an edge device by using Kickstart For an unattended installation in a network-based environment, you can install the RHEL for Edge image to an Edge device by using a Kickstart file and a web server. The web server serves the RHEL for Edge Commit and the Kickstart file, and both artifacts are used to start the RHEL Installer ISO image. Prerequisites You have the qemu-img utility installed on your host system. You have created a .qcow2 disk image to install the commit you created. See Creating a system image with RHEL image builder in the CLI . You have a running web server. See Creating a RHEL for Edge Container image for non-network-based deployments . Procedure Run a RHEL for Edge Container image to start a web server. The server fetches the commit in the RHEL for Edge Container image and becomes available and running. Run the RHEL Anaconda Installer, passing the customized .qcow2 disk image, by using libvirt virt-install : On the installation screen: Figure 6.2. Red Hat Enterprise Linux boot menu Press the TAB key and add the Kickstart kernel argument: The kernel parameter specifies that you want to install RHEL by using the Kickstart file and not the RHEL image contained in the RHEL Installer. After adding the kernel parameters, press Ctrl + X to boot the RHEL installation by using the Kickstart file. The RHEL Installer starts, fetches the Kickstart file from the server (HTTP) endpoint, and executes the commands, including the command to install the RHEL for Edge image commit from the HTTP endpoint. After the installation completes, the RHEL Installer prompts you for login details. Verification On the Login screen, enter your user account credentials and click Enter . Verify whether the RHEL for Edge image is successfully installed. The command output provides the image commit ID and shows that the installation is successful. The following is a sample output: Additional resources How to embed a Kickstart file into an ISO image (Red Hat Knowledgebase) Booting the installation | [
"tar xvf <UUID> -commit.tar",
"jq '.[\"ostree-commit\"]' < compose.json",
"rpm-ostree db list rhel/8/x86_64/edge --repo=repo",
"lang en_US.UTF-8 keyboard us timezone Etc/UTC --isUtc text zerombr clearpart --all --initlabel autopart reboot user --name=core --group=wheel sshkey --username=core \"ssh-rsa AAAA3Nza... .\" rootpw --lock network --bootproto=dhcp ostreesetup --nogpg --osname=rhel --remote=edge --url=https://mirror.example.com/repo/ --ref=rhel/8/x86_64/edge",
"events { } http { server{ listen 8080; root /usr/share/nginx/html; } } pid /run/nginx.pid; daemon off;",
"FROM registry.access.redhat.com/ubi8/ubi RUN yum -y install nginx && yum clean all COPY kickstart.ks /usr/share/nginx/html/ COPY repo /usr/share/nginx/html/ COPY nginx /etc/nginx.conf EXPOSE 8080 CMD [\"/usr/sbin/nginx\", \"-c\", \"/etc/nginx.conf\"] ARG commit ADD USD{commit} /usr/share/nginx/html/",
"lang en_US.UTF-8 keyboard us timezone Etc/UTC --isUtc text zerombr clearpart --all --initlabel autopart reboot user --name=core --group=wheel sshkey --username=core \"ssh-rsa AAAA3Nza... .\" ostreesetup --nogpg --osname=rhel --remote=edge --url=https://mirror.example.com/repo/ --ref=rhel/8/x86_64/edge %post cat << EOF > /etc/greenboot/check/required.d/check-dns.sh #!/bin/bash DNS_SERVER=USD(grep nameserver /etc/resolv.conf | cut -f2 -d\" \") COUNT=0 check DNS server is available ping -c1 USDDNS_SERVER while [ USD? != '0' ] && [ USDCOUNT -lt 10 ]; do COUNT++ echo \"Checking for DNS: Attempt USDCOUNT .\" sleep 10 ping -c 1 USDDNS_SERVER done EOF %end",
"podman build -t name-of-container-image --build-arg commit= uuid -commit.tar .",
"podman run --rm -d -p port :8080 localhost/ name-of-container-image",
"lang en_US.UTF-8 keyboard us timezone UTC zerombr clearpart --all --initlabel autopart --type=plain --fstype=xfs --nohome reboot text network --bootproto=dhcp user --name=core --groups=wheel --password=edge services --enabled=ostree-remount ostreesetup --nogpg --url=http://edge_device_ip:port/repo/ --osname=rhel --remote=edge --ref=rhel/9/x86_64/edge",
"virt-install --name rhel-edge-test-1 --memory 2048 --vcpus 2 --disk path=prepared_disk_image.qcow2,format=qcow2,size=8 --os-variant rhel8 --cdrom /home/username/Downloads/rhel-8-x86_64-boot.iso",
"inst.ks=http://web-server_device_ip:port/kickstart.ks",
"rpm-ostree status",
"State: idle Deployments: * ostree://edge:rhel/8/x86_64/edge Timestamp: 2020-09-18T20:06:54Z Commit: 836e637095554e0b634a0a48ea05c75280519dd6576a392635e6fa7d4d5e96",
"virt-install --name rhel-edge-test-1 --memory 2048 --vcpus 2 --disk path=prepared_disk_image.qcow2,format=qcow2,size=8 --os-variant rhel8 --cdrom /home/username/Downloads/rhel-8-x86_64-boot.iso",
"inst.ks=http://web-server_device_ip:port/kickstart.ks",
"rpm-ostree status",
"State: idle Deployments: * ostree://edge:rhel/8/x86_64/edge Timestamp: 2020-09-18T20:06:54Z Commit: 836e637095554e0b634a0a48ea05c75280519dd6576a392635e6fa7d4d5e96"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/composing_installing_and_managing_rhel_for_edge_images/installing-rpm-ostree-images_composing-installing-managing-rhel-for-edge-images |
22.2. IBM Redbooks Publications for IBM Z | 22.2. IBM Redbooks Publications for IBM Z Current versions of IBM Redbooks publications can be found at http://www.redbooks.ibm.com/ . They include: Introductory publications Introduction to the New Mainframe: z/VM Basics . IBM Redbooks . 2007. SG24-7316. Practical Migration to Linux on System z . IBM Redbooks . 2009. SG24-7727. Performance and high availability Linux on IBM System z: Performance Measurement and Tuning . IBM Redbooks . 2011. SG24-6926. Achieving High Availability on Linux for System z with Linux-HA Release 2 . IBM Redbooks . 2009. SG24-7711. Security Security for Linux on System z . IBM Redbooks . 2013. SG24-7728. Networking IBM System z Connectivity Handbook . IBM Redbooks . 2013. SG24-5444. OSA Express Implementation Guide . IBM Redbooks . 2009. SG24-5948. HiperSockets Implementation Guide . IBM Redbooks . 2007. SG24-6816. Fibre Channel Protocol for Linux and z/VM on IBM System z . IBM Redbooks . 2007. SG24-7266. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/installation_guide/sect-additional-references-ibm-redbooks-s390 |
Chapter 1. Support overview | Chapter 1. Support overview Red Hat offers cluster administrators tools for gathering data for your cluster, monitoring, and troubleshooting. 1.1. Get support Get support : Visit the Red Hat Customer Portal to review knowledge base articles, submit a support case, and review additional product documentation and resources. 1.2. Remote health monitoring issues Remote health monitoring issues : OpenShift Dedicated collects telemetry and configuration data about your cluster and reports it to Red Hat by using the Telemeter Client and the Insights Operator. Red Hat uses this data to understand and resolve issues in connected cluster . OpenShift Dedicated collects data and monitors health using the following: Telemetry : The Telemetry Client gathers and uploads the metrics values to Red Hat every four minutes and thirty seconds. Red Hat uses this data to: Monitor the clusters. Roll out OpenShift Dedicated upgrades. Improve the upgrade experience. Insight Operator : By default, OpenShift Dedicated installs and enables the Insight Operator, which reports configuration and component failure status every two hours. The Insight Operator helps to: Identify potential cluster issues proactively. Provide a solution and preventive action in Red Hat OpenShift Cluster Manager. You can review telemetry information . If you have enabled remote health reporting, Use Insights to identify issues . You can optionally disable remote health reporting. 1.3. Gather data about your cluster Gather data about your cluster : Red Hat recommends gathering your debugging information when opening a support case. This helps Red Hat Support to perform a root cause analysis. A cluster administrator can use the following to gather data about your cluster: The must-gather tool : Use the must-gather tool to collect information about your cluster and to debug the issues. sosreport : Use the sosreport tool to collect configuration details, system information, and diagnostic data for debugging purposes. Cluster ID : Obtain the unique identifier for your cluster, when providing information to Red Hat Support. Cluster node journal logs : Gather journald unit logs and logs within /var/log on individual cluster nodes to troubleshoot node-related issues. A network trace : Provide a network packet trace from a specific OpenShift Dedicated cluster node or a container to Red Hat Support to help troubleshoot network-related issues. 1.4. Troubleshooting issues A cluster administrator can monitor and troubleshoot the following OpenShift Dedicated component issues: Node issues : A cluster administrator can verify and troubleshoot node-related issues by reviewing the status, resource usage, and configuration of a node. You can query the following: Kubelet's status on a node. Cluster node journal logs. Operator issues : A cluster administrator can do the following to resolve Operator issues: Verify Operator subscription status. Check Operator pod health. Gather Operator logs. Pod issues : A cluster administrator can troubleshoot pod-related issues by reviewing the status of a pod and completing the following: Review pod and container logs. Start debug pods with root access. Source-to-image issues : A cluster administrator can observe the S2I stages to determine where in the S2I process a failure occurred. Gather the following to resolve Source-to-Image (S2I) issues: Source-to-Image diagnostic data. Application diagnostic data to investigate application failure. Storage issues : A multi-attach storage error occurs when the mounting volume on a new node is not possible because the failed node cannot unmount the attached volume. A cluster administrator can do the following to resolve multi-attach storage issues: Enable multiple attachments by using RWX volumes. Recover or delete the failed node when using an RWO volume. Monitoring issues : A cluster administrator can follow the procedures on the troubleshooting page for monitoring. If the metrics for your user-defined projects are unavailable or if Prometheus is consuming a lot of disk space, check the following: Investigate why user-defined metrics are unavailable. Determine why Prometheus is consuming a lot of disk space. OpenShift CLI ( oc ) issues : Investigate OpenShift CLI ( oc ) issues by increasing the log level. | null | https://docs.redhat.com/en/documentation/openshift_dedicated/4/html/support/support-overview |
23.4. CPU allocation | 23.4. CPU allocation <domain> ... <vcpu placement='static' cpuset="1-4,^3,6" current="1">2</vcpu> ... </domain> Figure 23.6. CPU Allocation The <vcpu> element defines the maximum number of virtual CPUs allocated for the guest virtual machine operating system, which must be between 1 and the maximum number supported by the hypervisor. This element can contain an optional cpuset attribute, which is a comma-separated list of physical CPU numbers that the domain process and virtual CPUs can be pinned to by default. Note that the pinning policy of the domain process and virtual CPUs can be specified separately by using the cputune attribute. If the emulatorpin attribute is specified in <cputune> , cpuset specified by <vcpu> will be ignored. Similarly, virtual CPUs that have set a value for vcpupin cause cpuset settings to be ignored. For virtual CPUs where vcpupin is not specified, it will be pinned to the physical CPUs specified by cpuset . Each element in the cpuset list is either a single CPU number, a range of CPU numbers, or a caret (^) followed by a CPU number to be excluded from a range. The attribute current can be used to specify whether fewer than the maximum number of virtual CPUs should be enabled. The placement optional attribute can be used to indicate the CPU placement mode for domain processes. The value of placement can be set as one of the following: static - pins the vCPU to the physical CPUs defined by the cpuset attribute. If cpuset is not defined, the domain processes will be pinned to all the available physical CPUs. auto - indicates the domain process will be pinned to the advisory nodeset from the querying numad, and the value of attribute cpuset will be ignored if it is specified. Note If the cpuset attribute is used along with placement , the value of placement defaults to the value of the <numatune> element (if it is used), or to static . | [
"<domain> <vcpu placement='static' cpuset=\"1-4,^3,6\" current=\"1\">2</vcpu> </domain>"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_deployment_and_administration_guide/sect-Manipulating_the_domain_xml-CPU_allocation |
Chapter 3. Performing basic operations with the Block Storage service (cinder) | Chapter 3. Performing basic operations with the Block Storage service (cinder) Create and configure Block Storage volumes as the primary form of persistent storage for Compute instances in your overcloud. Create volumes, attach your volumes to instances, edit and resize your volumes, and modify volume ownership. 3.1. Creating Block Storage volumes Create volumes to provide persistent storage for instances that you launch with the Compute service (nova) in the overcloud. Important The default maximum number of volumes you can create for a project is 10. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Access to the Red Hat OpenStack Platform (RHOSP) Dashboard (horizon). For more information, see Overcloud deployment output . Procedure In the dashboard, select Project > Compute > Volumes . Click Create Volume , and edit the following fields: Field Description Volume name Name of the volume. Description Optional, short description of the volume. Type Optional volume type (see Section 2.3, "Group volume configuration with volume types" ). If you have multiple Block Storage back ends, you can use this to select a specific back end. See Section 2.10, "Specifying back ends for volume creation" . Size (GB) Volume size (in gigabytes). If you want to create an encrypted volume from an unencrypted image, you must ensure that the volume size is larger than the image size so that the encryption data does not truncate the volume data. Availability Zone Availability zones (logical server groups), along with host aggregates, are a common method for segregating resources within OpenStack. Availability zones are defined during installation. For more information about availability zones and host aggregates, see Creating and managing host aggregates in the Configuring the Compute Service for Instance Creation guide. Specify a Volume Source : Source Description No source, empty volume The volume is empty and does not contain a file system or partition table. Snapshot Use an existing snapshot as a volume source. If you select this option, a new Use snapshot as a source list opens; you can then choose a snapshot from the list. If you want to create a new volume from a snapshot of an encrypted volume, you must ensure that the new volume is at least 1GB larger than the old volume. For more information about volume snapshots, see Section 4.1, "Creating, using, and deleting volume snapshots" . Image Use an existing image as a volume source. If you select this option, a new Use snapshot as a source list opens; you can then choose an image from the list. Volume Use an existing volume as a volume source. If you select this option, a new Use snapshot as a source list opens; you can then choose a volume from the list. Click Create Volume . After the volume is created, its name appears in the Volumes table. You can also change the volume type later on. For more information, see Section 4.5, "Block Storage volume retyping" . 3.2. Editing a volume name or description Edit volume names and descriptions in the Dashboard (horizon). Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Access to the Red Hat OpenStack Platform (RHOSP) Dashboard (horizon). For more information, see Overcloud deployment output . Procedure In the dashboard, select Project > Compute > Volumes . Select the volume's Edit Volume button. Edit the volume name or description as required. Click Edit Volume to save your changes. Note To create an encrypted volume, you must first have a volume type configured specifically for volume encryption. In addition, you must configure both Compute and Block Storage services to use the same static key. For information about how to set up the requirements for volume encryption, see Section 2.7, "Block Storage service (cinder) volume encryption" . 3.3. Resizing (extending) a Block Storage service volume Resize volumes to increase the storage capacity of the volumes. Note The ability to resize a volume in use is supported but is driver dependent. RBD is supported. You cannot extend in-use multi-attach volumes. For more information about support for this feature, contact Red Hat Support. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . Procedure List the volumes to retrieve the ID of the volume you want to extend: To resize the volume, run the following commands to specify the correct API microversion, then pass the volume ID and the new size (a value greater than the old one) as parameters: Replace <API microversion>, <volume ID>, and <size> with appropriate values. Use the following example as a guide: 3.4. Deleting a Block Storage service volume Use the Dashboard to delete volumes that you no longer require. Note A volume cannot be deleted if it has existing snapshots. For instructions on how to delete snapshots, see Section 4.1, "Creating, using, and deleting volume snapshots" . Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Access to the Red Hat OpenStack Platform (RHOSP) Dashboard (horizon). For more information, see Overcloud deployment output . Procedure In the dashboard, select Project > Compute > Volumes . In the Volumes table, select the volume to delete. Click Delete Volumes . 3.5. Allocating volumes to multiple back ends If the Block Storage service is configured to use multiple back ends, you can use configured volume types to specify where a volume should be created. For more information, see Section 2.10, "Specifying back ends for volume creation" . The Block Storage service will automatically choose a back end if you do not specify one during volume creation. Block Storage sets the first defined back end as a default; this back end will be used until it runs out of space. At that point, Block Storage will set the second defined back end as a default, and so on. If this is not suitable for your needs, you can use the filter scheduler to control how Block Storage should select back ends. This scheduler can use different filters to triage suitable back ends, such as: AvailabilityZoneFilter Filters out all back ends that do not meet the availability zone requirements of the requested volume. CapacityFilter Selects only back ends with enough space to accommodate the volume. CapabilitiesFilter Selects only back ends that can support any specified settings in the volume. InstanceLocality Configures clusters to use volumes local to the same node. Prerequisites A successful undercloud installation. For more information, see the Director Installation and Usage guide. Procedure Add an environment file to your deployment command that contains the following parameters: 1 You can also add the ControllerExtraConfig: hook and its nested sections to the parameter_defaults: section of an existing environment file. 3.6. Attaching a volume to an instance Instances can use a volume for persistent storage. A volume can only be attached to one instance at a time. For more information about instances, see Image service in the Creating and Managing Images guide. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Access to the Red Hat OpenStack Platform (RHOSP) Dashboard (horizon). For more information, see Overcloud deployment output . Procedure In the dashboard, select Project > Compute > Volumes . Select the Edit Attachments action. If the volume is not attached to an instance, the Attach To Instance drop-down list is visible. From the Attach To Instance list, select the instance to which you want to attach the volume. Click Attach Volume . 3.7. Detaching a volume from an instance Instances can use a volume for persistent storage. A volume can only be attached to one instance at a time. For more information about instances, see Image service in the Creating and Managing Images guide. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Access to the Red Hat OpenStack Platform (RHOSP) Dashboard (horizon). For more information, see Overcloud deployment output . Procedure In the dashboard, select Project > Compute > Volumes . Select the volume's Manage Attachments action. If the volume is attached to an instance, the instance's name is displayed in the Attachments table. Click Detach Volume in this and the dialog screen. 3.8. Configuring the access rights to a volume The default state of a volume is read-write to allow data to be written to and read from it. You can mark a volume as read-only to protect its data from being accidentally overwritten or deleted. Note After changing a volume to be read-only you can change it back to read-write again. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Procedure If the volume is already attached to an instance, then detach this volume. For more information, see Section 3.7, "Detaching a volume from an instance" . Set the required access rights for this volume: To set the access rights of a volume to read-only: To set the access rights of a volume to read-write: If you detached this volume from an instance to change the access rights, then re-attach the volume. For more information, see Section 3.6, "Attaching a volume to an instance" . 3.9. Changing a volume owner with the CLI To change a volume's owner, you will have to perform a volume transfer. A volume transfer is initiated by the volume's owner, and the volume's change in ownership is complete after the transfer is accepted by the volume's new owner. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Procedure Log in as the volume's current owner. List the available volumes: Initiate the volume transfer: Where VOLUME is the name or ID of the volume you wish to transfer. For example, The cinder transfer-create command clears the ownership of the volume and creates an id and auth_key for the transfer. These values can be given to, and used by, another user to accept the transfer and become the new owner of the volume. The new user can now claim ownership of the volume. To do so, the user should first log in from the command line and run: Where TRANSFERID and TRANSFERKEY are the id and auth_key values returned by the cinder transfer-create command, respectively. For example, Note You can view all available volume transfers using: 3.10. Changing a volume owner with the Dashboard To change a volume's owner, you will have to perform a volume transfer. A volume transfer is initiated by the volume's owner, and the volume's change in ownership is complete after the transfer is accepted by the volume's new owner. Prerequisites A successful undercloud installation. For more information, see Installing director on the undercloud . A successful overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . Access to the Red Hat OpenStack Platform (RHOSP) Dashboard (horizon). For more information, see Overcloud deployment output . Procedure As the volume owner in the dashboard, select Projects > Volumes . In the Actions column of the volume to transfer, select Create Transfer . In the Create Transfer dialog box, enter a name for the transfer and click Create Volume Transfer . The volume transfer is created, and in the Volume Transfer screen you can capture the transfer ID and the authorization key to send to the recipient project. Click the Download transfer credentials button to download a .txt file containing the transfer name , transfer ID , and authorization key . Note The authorization key is available only in the Volume Transfer screen. If you lose the authorization key, you must cancel the transfer and create another transfer to generate a new authorization key. Close the Volume Transfer screen to return to the volume list. The volume status changes to awaiting-transfer until the recipient project accepts the transfer Accept a volume transfer from the dashboard As the recipient project owner in the dashboard, select Projects > Volumes . Click Accept Transfer . In the Accept Volume Transfer dialog box, enter the transfer ID and the authorization key that you received from the volume owner and click Accept Volume Transfer . The volume now appears in the volume list for the active project. | [
"cinder list",
"OS_VOLUME_API_VERSION=<API microversion> cinder extend <volume ID> <size>",
"OS_VOLUME_API_VERSION=3.42 cinder extend 573e024d-5235-49ce-8332-be1576d323f8 10",
"parameter_defaults: ControllerExtraConfig: # 1 cinder::config::cinder_config: DEFAULT/scheduler_default_filters: value: 'AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter,InstanceLocality'",
"cinder readonly-mode-update <VOLUME-ID> true",
"cinder readonly-mode-update <VOLUME-ID> false",
"cinder list",
"cinder transfer-create VOLUME",
"+------------+--------------------------------------+ | Property | Value | +------------+--------------------------------------+ | auth_key | f03bf51ce7ead189 | | created_at | 2014-12-08T03:46:31.884066 | | id | 3f5dc551-c675-4205-a13a-d30f88527490 | | name | None | | volume_id | bcf7d015-4843-464c-880d-7376851ca728 | +------------+--------------------------------------+",
"cinder transfer-accept TRANSFERID TRANSFERKEY",
"cinder transfer-accept 3f5dc551-c675-4205-a13a-d30f88527490 f03bf51ce7ead189",
"cinder transfer-list"
] | https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/storage_guide/assembly-performing-basic-operations-with-block-storage_osp-storage-guide |
Chapter 2. ContainerRuntimeConfig [machineconfiguration.openshift.io/v1] | Chapter 2. ContainerRuntimeConfig [machineconfiguration.openshift.io/v1] Description ContainerRuntimeConfig describes a customized Container Runtime configuration. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Type object Required spec 2.1. Specification Property Type Description apiVersion string APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources kind string Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds metadata ObjectMeta Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata spec object ContainerRuntimeConfigSpec defines the desired state of ContainerRuntimeConfig status object ContainerRuntimeConfigStatus defines the observed state of a ContainerRuntimeConfig 2.1.1. .spec Description ContainerRuntimeConfigSpec defines the desired state of ContainerRuntimeConfig Type object Required containerRuntimeConfig Property Type Description containerRuntimeConfig object ContainerRuntimeConfiguration defines the tuneables of the container runtime machineConfigPoolSelector object MachineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. A nil selector will result in no pools being selected. 2.1.2. .spec.containerRuntimeConfig Description ContainerRuntimeConfiguration defines the tuneables of the container runtime Type object Property Type Description defaultRuntime string defaultRuntime is the name of the OCI runtime to be used as the default. logLevel string logLevel specifies the verbosity of the logs based on the level it is set to. Options are fatal, panic, error, warn, info, and debug. logSizeMax integer-or-string logSizeMax specifies the Maximum size allowed for the container log file. Negative numbers indicate that no size limit is imposed. If it is positive, it must be >= 8192 to match/exceed conmon's read buffer. overlaySize integer-or-string overlaySize specifies the maximum size of a container image. This flag can be used to set quota on the size of container images. (default: 10GB) pidsLimit integer pidsLimit specifies the maximum number of processes allowed in a container 2.1.3. .spec.machineConfigPoolSelector Description MachineConfigPoolSelector selects which pools the ContainerRuntimeConfig shoud apply to. A nil selector will result in no pools being selected. Type object Property Type Description matchExpressions array matchExpressions is a list of label selector requirements. The requirements are ANDed. matchExpressions[] object A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. matchLabels object (string) matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. 2.1.4. .spec.machineConfigPoolSelector.matchExpressions Description matchExpressions is a list of label selector requirements. The requirements are ANDed. Type array 2.1.5. .spec.machineConfigPoolSelector.matchExpressions[] Description A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. Type object Required key operator Property Type Description key string key is the label key that the selector applies to. operator string operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. values array (string) values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. 2.1.6. .status Description ContainerRuntimeConfigStatus defines the observed state of a ContainerRuntimeConfig Type object Property Type Description conditions array conditions represents the latest available observations of current state. conditions[] object ContainerRuntimeConfigCondition defines the state of the ContainerRuntimeConfig observedGeneration integer observedGeneration represents the generation observed by the controller. 2.1.7. .status.conditions Description conditions represents the latest available observations of current state. Type array 2.1.8. .status.conditions[] Description ContainerRuntimeConfigCondition defines the state of the ContainerRuntimeConfig Type object Property Type Description lastTransitionTime `` lastTransitionTime is the time of the last update to the current status object. message string message provides additional information about the current condition. This is only to be consumed by humans. reason string reason is the reason for the condition's last transition. Reasons are PascalCase status string status of the condition, one of True, False, Unknown. type string type specifies the state of the operator's reconciliation functionality. 2.2. API endpoints The following API endpoints are available: /apis/machineconfiguration.openshift.io/v1/containerruntimeconfigs DELETE : delete collection of ContainerRuntimeConfig GET : list objects of kind ContainerRuntimeConfig POST : create a ContainerRuntimeConfig /apis/machineconfiguration.openshift.io/v1/containerruntimeconfigs/{name} DELETE : delete a ContainerRuntimeConfig GET : read the specified ContainerRuntimeConfig PATCH : partially update the specified ContainerRuntimeConfig PUT : replace the specified ContainerRuntimeConfig /apis/machineconfiguration.openshift.io/v1/containerruntimeconfigs/{name}/status GET : read status of the specified ContainerRuntimeConfig PATCH : partially update status of the specified ContainerRuntimeConfig PUT : replace status of the specified ContainerRuntimeConfig 2.2.1. /apis/machineconfiguration.openshift.io/v1/containerruntimeconfigs HTTP method DELETE Description delete collection of ContainerRuntimeConfig Table 2.1. HTTP responses HTTP code Reponse body 200 - OK Status schema 401 - Unauthorized Empty HTTP method GET Description list objects of kind ContainerRuntimeConfig Table 2.2. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfigList schema 401 - Unauthorized Empty HTTP method POST Description create a ContainerRuntimeConfig Table 2.3. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 2.4. Body parameters Parameter Type Description body ContainerRuntimeConfig schema Table 2.5. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 201 - Created ContainerRuntimeConfig schema 202 - Accepted ContainerRuntimeConfig schema 401 - Unauthorized Empty 2.2.2. /apis/machineconfiguration.openshift.io/v1/containerruntimeconfigs/{name} Table 2.6. Global path parameters Parameter Type Description name string name of the ContainerRuntimeConfig HTTP method DELETE Description delete a ContainerRuntimeConfig Table 2.7. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed Table 2.8. HTTP responses HTTP code Reponse body 200 - OK Status schema 202 - Accepted Status schema 401 - Unauthorized Empty HTTP method GET Description read the specified ContainerRuntimeConfig Table 2.9. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 401 - Unauthorized Empty HTTP method PATCH Description partially update the specified ContainerRuntimeConfig Table 2.10. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 2.11. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 401 - Unauthorized Empty HTTP method PUT Description replace the specified ContainerRuntimeConfig Table 2.12. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 2.13. Body parameters Parameter Type Description body ContainerRuntimeConfig schema Table 2.14. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 201 - Created ContainerRuntimeConfig schema 401 - Unauthorized Empty 2.2.3. /apis/machineconfiguration.openshift.io/v1/containerruntimeconfigs/{name}/status Table 2.15. Global path parameters Parameter Type Description name string name of the ContainerRuntimeConfig HTTP method GET Description read status of the specified ContainerRuntimeConfig Table 2.16. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 401 - Unauthorized Empty HTTP method PATCH Description partially update status of the specified ContainerRuntimeConfig Table 2.17. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 2.18. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 401 - Unauthorized Empty HTTP method PUT Description replace status of the specified ContainerRuntimeConfig Table 2.19. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 2.20. Body parameters Parameter Type Description body ContainerRuntimeConfig schema Table 2.21. HTTP responses HTTP code Reponse body 200 - OK ContainerRuntimeConfig schema 201 - Created ContainerRuntimeConfig schema 401 - Unauthorized Empty | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/machine_apis/containerruntimeconfig-machineconfiguration-openshift-io-v1 |
Chapter 1. Node APIs | Chapter 1. Node APIs 1.1. Node [v1] Description Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd). Type object 1.2. PerformanceProfile [performance.openshift.io/v2] Description PerformanceProfile is the Schema for the performanceprofiles API Type object 1.3. Profile [tuned.openshift.io/v1] Description Profile is a specification for a Profile resource. Type object 1.4. RuntimeClass [node.k8s.io/v1] Description RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/ Type object 1.5. Tuned [tuned.openshift.io/v1] Description Tuned is a collection of rules that allows cluster-wide deployment of node-level sysctls and more flexibility to add custom tuning specified by user needs. These rules are translated and passed to all containerized Tuned daemons running in the cluster in the format that the daemons understand. The responsibility for applying the node-level tuning then lies with the containerized Tuned daemons. More info: https://github.com/openshift/cluster-node-tuning-operator Type object | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.12/html/node_apis/node-apis |
Preface | Preface This guide provides recommended practices for various processes needed to install, configure, and maintain Ansible Automation Platform on Red Hat Enterprise Linux in a secure manner. | null | https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/red_hat_ansible_automation_platform_hardening_guide/pr01 |
Chapter 1. Introducing Software Collections | Chapter 1. Introducing Software Collections This chapter introduces you to the concept and usage of Software Collections or SCLs for short. 1.1. Why Package Software with RPM? The RPM Package Manager (RPM) is a package management system that runs on Red Hat Enterprise Linux. RPM makes it easier for you to distribute, manage, and update software that you create for Red Hat Enterprise Linux. Many software vendors distribute their software via a conventional archive file (such as a tarball). However, there are several advantages in packaging software into RPM packages. These advantages are outlined below. With RPM, you can: Install, reinstall, remove, upgrade and verify packages. Users can use standard package management tools (for example Yum or PackageKit ) to install, reinstall, remove, upgrade and verify your RPM packages. Use a database of installed packages to query and verify packages. Because RPM maintains a database of installed packages and their files, users can easily query and verify packages on their system. Use metadata to describe packages, their installation instructions, and so on. Each RPM package includes metadata that describes the package's components, version, release, size, project URL, installation instructions, and so on. Package pristine software sources into source and binary packages. RPM allows you to take pristine software sources and package them into source and binary packages for your users. In source packages, you have the pristine sources along with any patches that were used, plus complete build instructions. This design eases the maintenance of the packages as new versions of your software are released. Add packages to Yum repositories. You can add your package to a Yum repository that enables clients to easily find and deploy your software. Digitally sign your packages. Using a GPG signing key, you can digitally sign your package so that users are able to verify the authenticity of the package. For in-depth information on what is RPM and how to use it, see the Red Hat Enterprise Linux 7 System Administrator's Guide , or the Red Hat Enterprise Linux 6 Deployment Guide . | null | https://docs.redhat.com/en/documentation/red_hat_software_collections/3/html/packaging_guide/chap-introducing_software_collections |
Chapter 1. Introduction to RHOSO certification program | Chapter 1. Introduction to RHOSO certification program Certify your infrastructure and applications deployed on Red Hat OpenStack Services on OpenShift (RHOSO). 1.1. The Red Hat certification program overview The Red Hat certification program ensures the compatibility of your applications deployed on Red Hat OpenStack Services on OpenShift (RHOSO). The program has three main elements: Test suite - Contains tests for hardware or software applications undergoing certification. Red Hat Certification Ecosystem - Helps to explore and find certified products including hardware, software, cloud, and service providers. Support - Provides a joint support relationship between you and Red Hat. This table summarizes the basic differences between a product listing and components: Product listing Component (Project) Includes detailed information about your product. The individual containers, operators, helm charts, and infrastructure services that you test, certify, and then add to the product listing. Products are composed of one or more components. Components are added to a product listing. You add components to a product for proceeding with certification. A component can be used in multiple products by adding it to each product listing. A product can not be published without certified components. Certified components are published as part of a product listing. 1.2. The RHOSO certification workflow Note Red Hat recommends that you are a Red Hat Certified Engineer or hold equivalent experience before starting the certification process. The following diagram gives an overview of the certification process. Figure 1.1. The RHOSO certification workflow 1.3. Getting support and giving feedback For any questions related to the Red Hat certification toolset, certification process, or procedure described in this documentation, refer to the KB Articles , Red Hat Customer Portal , and Red Hat Partner Connect . You can also open a support case to get support or submit feedback. To open a support case see, How do I open and manage a support case on the Customer Portal? Questions during certification If you have any questions about a specific certification, record them in the Comments section of the Dialog Tab of the certification entry. Warning If you face any preliminary issues that stop you from proceeding with the product certification, contact your Engineering Partner Manager or other engineering engagements to resolve them. | null | https://docs.redhat.com/en/documentation/red_hat_software_certification/2025/html/red_hat_openstack_services_on_openshift_certification_workflow_guide/introduction-to-red-hat-openstack-services-on-openshift-rhoso-certification-program_rhoso-workflow-guide |
Chapter 2. Installing the Integration Test Suite (tempest) | Chapter 2. Installing the Integration Test Suite (tempest) You can install the Integration Test Suite either with director or with a manual installation. To install the Integration Test Suite with director, see Installing the Integration Test Suite with director . To manually install the Integration Test Suite, Installing the Integration Test Suite manually . 2.1. Prerequisites An undercloud installation. For more information, see Installing the undercloud . An overcloud deployment. For more information, see Creating a basic overcloud with CLI tools . 2.2. Installing the Integration Test Suite with director Use the Red Hat OpenStack Platform (RHOSP) director to install the test suite automatically. Prerequisites You have installed the python3-tripleoclient packages. For more information, see Installing director packages in the Director Installation and Usage guide. Procedure Log in to the undercloud host as the stack user. Edit the undercloud.conf file located in the home directory of the stack user. Set the enable_tempest parameter to true . Run the openstack undercloud install command to include the extra configuration in the undercloud: 2.3. Installing the Integration Test Suite manually If you do not want to install the Integration Test Suite (tempest) automatically with director, you can perform the installation manually later. You must ensure that you have a basic network configuration, install the Integration Test Suite packages, and create a configuration file that contains details about your OpenStack services and other testing behaviour switches. Procedure Ensure that the following networks are available within your Red Hat OpenStack Platform (RHOSP) environment: An external network that can provide a floating IP. A private network. Connect these networks through a router. To create the private network, specify the following options according to your network deployment: To create the public network, specify the following options according to your network deployment: Install the packages related to the Integration Test Suite: This command does not install any tempest plugins. You must install the plugins manually, depending on your RHOSP installation. Install the appropriate tempest plugin for each component in your environment. For example, enter the following command to install the keystone, neutron, cinder, and telemetry plugins: For a full list of packages, see Integration Test Suite packages . Note You can also install the openstack-tempest-all package. This package contains all of the tempest plugins. 2.3.1. Integration Test Suite packages Use dnf search to retrieve a list of tempest test packages: Component Package Name barbican python3-barbican-tests-tempest cinder python3-cinder-tests-tempest designate python3-designate-tests-tempest ec2-api python3-ec2api-tests-tempest heat python3-heat-tests-tempest ironic python3-ironic-tests-tempest keystone python3-keystone-tests-tempest kuryr python3-kuryr-tests-tempest manila python3-manila-tests-tempest mistral python3-mistral-tests-tempest networking-bgvpn python3-networking-bgpvpn-tests-tempest networking-l2gw python3-networking-l2gw-tests-tempest neutron python3-neutron-tests-tempest nova-join python3-novajoin-tests-tempest octavia python3-octavia-tests-tempest patrole python3-patrole-tests-tempest telemetry python3-telemetry-tests-tempest tripleo-common python3-tripleo-common-tests-tempest zaqar python3-zaqar-tests-tempest Note The python3-telemetry-tests-tempest package contains plugins for aodh, panko, gnocchi, and ceilometer tests. The python3-ironic-tests-tempest package contains plugins for ironic and ironic-inspector. | [
"enable_tempest = true",
"openstack undercloud install",
"openstack network create <network_name> --share openstack subnet create <subnet_name> --subnet-range <address/prefix> --network <network_name> openstack router create <router_name> openstack router add subnet <router_name> <subnet_name>",
"openstack network create <network_name> --external --provider-network-type flat --provider-physical-network datacentre openstack subnet create <subnet_name> --subnet-range <address/prefix> --gateway <default_gateway> --no-dhcp --network <network_name> openstack router set <router_name> --external-gateway <public_network_name>",
"sudo dnf -y install openstack-tempest",
"sudo dnf install python3-keystone-tests-tempest python3-neutron-tests-tempest python3-cinder-tests-tempest python3-telemetry-tests-tempest",
"sudo dnf search USD(openstack service list -c Name -f value) 2>/dev/null | grep test | awk '{print USD1}'"
] | https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/openstack_integration_test_suite_guide/assembly_installing-the-integration-test-suite-tempest_tempest |
B.38.11. RHBA-2011:1412 - kernel bug fix update | B.38.11. RHBA-2011:1412 - kernel bug fix update Updated kernel packages that fix one bug are now available for Red Hat Enterprise Linux 6 Extended Update Support. The kernel packages contain the Linux kernel, the core of any Linux operating system. Bug Fix BZ# 695256 While executing a multi-threaded process by multiple CPUs, page-directory-pointer-table entry (PDPTE) registers were not fully flushed from the CPU cache when a Page Global Directory (PGD) entry was changed in x86 Physical Address Extension (PAE) mode. As a consequence, the process failed to respond for a long time before it successfully finished. With this update, the kernel has been modified to flush the Translation Lookaside Buffer (TLB) for each CPU using a page table that has changed. Multi-threaded processes now finish without hanging. All users of kernel are advised to upgrade to these updated packages, which fix this bug. The system must be rebooted for this update to take effect. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.0_technical_notes/rhba-2011-1412 |
Chapter 7. Fixed issues | Chapter 7. Fixed issues ENTMQIC-2752 - Some details missing on dispositions forwarded by router on transacted sessions Previously, as messages moved from a client through a router to a broker in a transacted session, the disposition from the broker was not forwarded from the router back to the client. This issue has been resolved. ENTMQIC-2624 - Provide example yaml files for installation of operator and container image Previously, if you wanted to use the AMQ Interconnect Operator, you had to provision it using the OperatorHub. The release includes files and instructions to provision the operator from the command-line. ENTMQIC-2427 - In qpid-dispatch-console the addresses page is not showing the ingress and egress counts properly Previously, the AMQ Management Console might incorrectly display 0 for delivery attribute values associated with mobile addresses. This issue has been resolved. | null | https://docs.redhat.com/en/documentation/red_hat_amq/2021.q1/html/release_notes_for_amq_interconnect_1.10/fixed_issues |
Operator APIs | Operator APIs OpenShift Container Platform 4.15 Reference guide for Operator APIs Red Hat OpenShift Documentation Team | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/operator_apis/index |
22.8. User Certificates | 22.8. User Certificates For information on user certificates, see Chapter 24, Managing Certificates for Users, Hosts, and Services . | null | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/linux_domain_identity_authentication_and_policy_guide/user-certificates-management |
Chapter 12. Supported integration with Red Hat products | Chapter 12. Supported integration with Red Hat products Streams for Apache Kafka 2.7 supports integration with the following Red Hat products: Red Hat build of Keycloak Provides OAuth 2.0 authentication and OAuth 2.0 authorization. Red Hat 3scale API Management Secures the Kafka Bridge and provides additional API management features. Red Hat build of Debezium Monitors databases and creates event streams. Red Hat build of Apicurio Registry Provides a centralized store of service schemas for data streaming. Red Hat build of Apache Camel K Provides a lightweight integration framework. For information on the functionality these products can introduce to your Streams for Apache Kafka deployment, refer to the product documentation. 12.1. Red Hat build of Keycloak (formerly Red Hat Single Sign-On) Streams for Apache Kafka supports OAuth 2.0 token-based authorization through Red Hat build of Keycloak Authorization Services , providing centralized management of security policies and permissions. Note Red Hat build of Keycloak replaces Red Hat Single Sign-On, which is now in maintenance support. We are working on updating our documentation, resources, and media to reflect this transition. In the interim, content that describes using Single Sign-On in the Streams for Apache Kafka documentation also applies to using the Red Hat build of Keycloak. 12.2. Red Hat 3scale API Management If you deployed the Kafka Bridge on OpenShift Container Platform, you can use it with 3scale. 3scale API Management can secure the Kafka Bridge with TLS, and provide authentication and authorization. Integration with 3scale also means that additional features like metrics, rate limiting and billing are available. For information on deploying 3scale, see Using 3scale API Management with the Streams for Apache Kafka Bridge . 12.3. Red Hat build of Debezium for change data capture The Red Hat build of Debezium is a distributed change data capture platform. It captures row-level changes in databases, creates change event records, and streams the records to Kafka topics. Debezium is built on Apache Kafka. You can deploy and integrate the Red Hat build of Debezium with Streams for Apache Kafka. Following a deployment of Streams for Apache Kafka, you deploy Debezium as a connector configuration through Kafka Connect. Debezium passes change event records to Streams for Apache Kafka on OpenShift. Applications can read these change event streams and access the change events in the order in which they occurred. For more information on deploying Debezium with Streams for Apache Kafka, refer to the product documentation for the Red Hat build of Debezium . 12.4. Red Hat build of Apicurio Registry for schema validation You can use the Red Hat build of Apicurio Registry as a centralized store of service schemas for data streaming. For Kafka, you can use the Red Hat build of Apicurio Registry to store Apache Avro or JSON schema. Apicurio Registry provides a REST API and a Java REST client to register and query the schemas from client applications through server-side endpoints. Using Apicurio Registry decouples the process of managing schemas from the configuration of client applications. You enable an application to use a schema from the registry by specifying its URL in the client code. For example, the schemas to serialize and deserialize messages can be stored in the registry, which are then referenced from the applications that use them to ensure that the messages that they send and receive are compatible with those schemas. Kafka client applications can push or pull their schemas from Apicurio Registry at runtime. For more information on using the Red Hat build of Apicurio Registry with Streams for Apache Kafka, refer to the product documentation for the Red Hat build of Apicurio Registry . 12.5. Red Hat build of Apache Camel K The Red Hat build of Apache Camel K is a lightweight integration framework built from Apache Camel K that runs natively in the cloud on OpenShift. Camel K supports serverless integration, which allows for development and deployment of integration tasks without the need to manage the underlying infrastructure. You can use Camel K to build and integrate event-driven applications with your Streams for Apache Kafka environment. For scenarios requiring real-time data synchronization between different systems or databases, Camel K can be used to capture and transform change in events and send them to Streams for Apache Kafka for distribution to other systems. For more information on using the Camel K with Streams for Apache Kafka, refer to the product documentation for the Red Hat build of Apache Camel K . Additional resources Red Hat build of Keycloak Supported Configurations Red Hat Single Sign-On Supported Configurations (maintenance) Red Hat 3scale API Management Supported Configurations Red Hat build of Debezium Supported Configurations Red Hat build of Apicurio Registry Supported Configurations Red Hat build of Apache Camel K Supported Configurations Revised on 2024-11-19 15:44:47 UTC | null | https://docs.redhat.com/en/documentation/red_hat_streams_for_apache_kafka/2.7/html/release_notes_for_streams_for_apache_kafka_2.7_on_openshift/supported-config-str |
Chapter 18. Using Kafka Exporter | Chapter 18. Using Kafka Exporter Kafka Exporter is an open source project to enhance monitoring of Apache Kafka brokers and clients. Kafka Exporter is provided with AMQ Streams for deployment with a Kafka cluster to extract additional metrics data from Kafka brokers related to offsets, consumer groups, consumer lag, and topics. The metrics data is used, for example, to help identify slow consumers. Lag data is exposed as Prometheus metrics, which can then be presented in Grafana for analysis. If you are already using Prometheus and Grafana for monitoring of built-in Kafka metrics, you can configure Prometheus to also scrape the Kafka Exporter Prometheus endpoint. Kafka exposes metrics through JMX, which can then be exported as Prometheus metrics. For more information, see Monitoring your cluster using JMX . 18.1. Consumer lag Consumer lag indicates the difference in the rate of production and consumption of messages. Specifically, consumer lag for a given consumer group indicates the delay between the last message in the partition and the message being currently picked up by that consumer. The lag reflects the position of the consumer offset in relation to the end of the partition log. This difference is sometimes referred to as the delta between the producer offset and consumer offset, the read and write positions in the Kafka broker topic partitions. Suppose a topic streams 100 messages a second. A lag of 1000 messages between the producer offset (the topic partition head) and the last offset the consumer has read means a 10-second delay. The importance of monitoring consumer lag For applications that rely on the processing of (near) real-time data, it is critical to monitor consumer lag to check that it does not become too big. The greater the lag becomes, the further the process moves from the real-time processing objective. Consumer lag, for example, might be a result of consuming too much old data that has not been purged, or through unplanned shutdowns. Reducing consumer lag Typical actions to reduce lag include: Scaling-up consumer groups by adding new consumers Increasing the retention time for a message to remain in a topic Adding more disk capacity to increase the message buffer Actions to reduce consumer lag depend on the underlying infrastructure and the use cases AMQ Streams is supporting. For instance, a lagging consumer is less likely to benefit from the broker being able to service a fetch request from its disk cache. And in certain cases, it might be acceptable to automatically drop messages until a consumer has caught up. 18.2. Kafka Exporter alerting rule examples The sample alert notification rules specific to Kafka Exporter are as follows: UnderReplicatedPartition An alert to warn that a topic is under-replicated and the broker is not replicating enough partitions. The default configuration is for an alert if there are one or more under-replicated partitions for a topic. The alert might signify that a Kafka instance is down or the Kafka cluster is overloaded. A planned restart of the Kafka broker may be required to restart the replication process. TooLargeConsumerGroupLag An alert to warn that the lag on a consumer group is too large for a specific topic partition. The default configuration is 1000 records. A large lag might indicate that consumers are too slow and are falling behind the producers. NoMessageForTooLong An alert to warn that a topic has not received messages for a period of time. The default configuration for the time period is 10 minutes. The delay might be a result of a configuration issue preventing a producer from publishing messages to the topic. You can adapt alerting rules according to your specific needs. Additional resources For more information about setting up alerting rules, see Configuration in the Prometheus documentation. 18.3. Kafka Exporter metrics Lag information is exposed by Kafka Exporter as Prometheus metrics for presentation in Grafana. Kafka Exporter exposes metrics data for brokers, topics, and consumer groups. Table 18.1. Broker metrics output Name Information kafka_brokers Number of brokers in the Kafka cluster Table 18.2. Topic metrics output Name Information kafka_topic_partitions Number of partitions for a topic kafka_topic_partition_current_offset Current topic partition offset for a broker kafka_topic_partition_oldest_offset Oldest topic partition offset for a broker kafka_topic_partition_in_sync_replica Number of in-sync replicas for a topic partition kafka_topic_partition_leader Leader broker ID of a topic partition kafka_topic_partition_leader_is_preferred Shows 1 if a topic partition is using the preferred broker kafka_topic_partition_replicas Number of replicas for this topic partition kafka_topic_partition_under_replicated_partition Shows 1 if a topic partition is under-replicated Table 18.3. Consumer group metrics output Name Information kafka_consumergroup_current_offset Current topic partition offset for a consumer group kafka_consumergroup_lag Current approximate lag for a consumer group at a topic partition 18.4. Running Kafka Exporter Run Kafka Exporter to expose Prometheus metrics for presentation in a Grafana dashboard. Download and install the Kafka Exporter package to use the Kafka Exporter with AMQ Streams. You need an AMQ Streams subscription to be able to download and install the package. Prerequisites AMQ Streams is installed on the host You have a subscription to AMQ Streams This procedure assumes you already have access to a Grafana user interface and Prometheus is deployed and added as a data source. Procedure Install the Kafka Exporter package: dnf install kafka_exporter Verify the package has installed: dnf info kafka_exporter Run the Kafka Exporter using appropriate configuration parameter values: kafka_exporter --kafka.server= <kafka_bootstrap_address> :9092 --kafka.version=3.5.0 -- <my_other_parameters> The parameters require a double-hyphen convention, such as --kafka.server . Table 18.4. Kafka Exporter configuration parameters Option Description Default kafka.server Host/post address of the Kafka server. kafka:9092 kafka.version Kafka broker version. 1.0.0 group.filter A regular expression to specify the consumer groups to include in the metrics. .* (all) topic.filter A regular expression to specify the topics to include in the metrics. .* (all) sasl.< parameter > Parameters to enable and connect to the Kafka cluster using SASL/PLAIN authentication, with user name and password. false tls.< parameter > Parameters to enable connect to the Kafka cluster using TLS authentication, with optional certificate and key. false web.listen-address Port address to expose the metrics. :9308 web.telemetry-path Path for the exposed metrics. /metrics log.level Logging configuration, to log messages with a given severity (debug, info, warn, error, fatal) or above. info log.enable-sarama Boolean to enable Sarama logging, a Go client library used by the Kafka Exporter. false legacy.partitions Boolean to enable metrics to be fetched from inactive topic partitions as well as from active partitions. If you want Kafka Exporter to return metrics for inactive partitions, set to true . false You can use kafka_exporter --help for information on the properties. Configure Prometheus to monitor the Kafka Exporter metrics. For more information on configuring Prometheus, see the Prometheus documentation . Enable Grafana to present the Kafka Exporter metrics data exposed by Prometheus. For more information, see Presenting Kafka Exporter metrics in Grafana . Updating Kafka Exporter Use the latest version of Kafka Exporter with your AMQ Streams installation. To check for updates, use: dnf check-update To update Kafka Exporter, use: dnf update kafka_exporter 18.5. Presenting Kafka Exporter metrics in Grafana Using Kafka Exporter Prometheus metrics as a data source, you can create a dashboard of Grafana charts. For example, from the metrics you can create the following Grafana charts: Message in per second (from topics) Message in per minute (from topics) Lag by consumer group Messages consumed per minute (by consumer groups) When metrics data has been collected for some time, the Kafka Exporter charts are populated. Use the Grafana charts to analyze lag and to check if actions to reduce lag are having an impact on an affected consumer group. If, for example, Kafka brokers are adjusted to reduce lag, the dashboard will show the Lag by consumer group chart going down and the Messages consumed per minute chart going up. Additional resources Example dashboard for Kafka Exporter Grafana documentation | [
"dnf install kafka_exporter",
"dnf info kafka_exporter",
"kafka_exporter --kafka.server= <kafka_bootstrap_address> :9092 --kafka.version=3.5.0 -- <my_other_parameters>",
"dnf check-update",
"dnf update kafka_exporter"
] | https://docs.redhat.com/en/documentation/red_hat_streams_for_apache_kafka/2.5/html/using_amq_streams_on_rhel/assembly-kafka-exporter-str |
2.3.3. Inattentive Administration | 2.3.3. Inattentive Administration Administrators who fail to patch their systems are one of the greatest threats to server security. According to the System Administration Network and Security Institute ( SANS ), the primary cause of computer security vulnerability is to "assign untrained people to maintain security and provide neither the training nor the time to make it possible to do the job." [4] This applies as much to inexperienced administrators as it does to overconfident or amotivated administrators. Some administrators fail to patch their servers and workstations, while others fail to watch log messages from the system kernel or network traffic. Another common error is when default passwords or keys to services are left unchanged. For example, some databases have default administration passwords because the database developers assume that the system administrator changes these passwords immediately after installation. If a database administrator fails to change this password, even an inexperienced cracker can use a widely-known default password to gain administrative privileges to the database. These are only a few examples of how inattentive administration can lead to compromised servers. [4] Source: https://www.sans.org/reading_room/whitepapers/hsoffice/addressing_and_i mplementing_computer_security_for_a_small_branch_office_620 | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/security_guide/s2-risk-serv-lazyadmin |
Using single sign-on with JBoss EAP | Using single sign-on with JBoss EAP Red Hat JBoss Enterprise Application Platform 8.0 Guide to using single sign-on to add authentication to applications deployed on JBoss EAP Red Hat Customer Content Services | null | https://docs.redhat.com/en/documentation/red_hat_jboss_enterprise_application_platform/8.0/html/using_single_sign-on_with_jboss_eap/index |
4.6. Configuring the Default Gateway | 4.6. Configuring the Default Gateway The default gateway is determined by the network scripts which parse the /etc/sysconfig/network file first and then the network interface ifcfg files for interfaces that are " up " . The ifcfg files are parsed in numerically ascending order, and the last GATEWAY directive to be read is used to compose a default route in the routing table. You can specify the default route using the GATEWAY directive, either globally or in interface-specific configuration files. However, in Red Hat Enterprise Linux the use of the global /etc/sysconfig/network file is deprecated, and specifying the gateway should now only be done in per-interface configuration files. In dynamic network environments, where mobile hosts are managed by NetworkManager , gateway information is likely to be interface specific and is best left to be assigned by DHCP . In special cases where it is necessary to influence NetworkManager 's selection of the exit interface to be used to reach a gateway, make use of the DEFROUTE=no command in the ifcfg files for those interfaces which do not lead to the default gateway. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/networking_guide/sec-configuring_the_default_gateway |
Providing feedback on Red Hat documentation | Providing feedback on Red Hat documentation If you have a suggestion to improve this documentation, or find an error, you can contact technical support at https://access.redhat.com to open a request. | null | https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.5/html/using_content_navigator/providing-feedback |
Chapter 8. Applying patches with kernel live patching | Chapter 8. Applying patches with kernel live patching You can use the Red Hat Enterprise Linux kernel live patching solution to patch a running kernel without rebooting or restarting any processes. With this solution, system administrators: Can immediately apply critical security patches to the kernel. Do not have to wait for long-running tasks to complete, for users to log off, or for scheduled downtime. Control the system's uptime more and do not sacrifice security or stability. By using the kernel live patching, you can reduce the number of reboots required for security patches. However, note that you cannot address all critical or important CVEs. For more details about the scope of live patching, see the Red Hat Knowledgebase solution Is live kernel patch (kpatch) supported in Red Hat Enterprise Linux? . Warning Some incompatibilities exist between kernel live patching and other kernel subcomponents. Read the Limitations of kpatch carefully before using kernel live patching. Note For details about the support cadence of kernel live patching updates, see: Kernel Live Patch Support Cadence Update Kernel Live Patch life cycles 8.1. Limitations of kpatch By using the kpatch feature, you can apply simple security and bug fix updates that do not require an immediate system reboot. You must not use the SystemTap or kprobe tool during or after loading a patch. The patch might not take effect until the probes are removed. 8.2. Support for third-party live patching The kpatch utility is the only kernel live patching utility supported by Red Hat with the RPM modules provided by Red Hat repositories. Red Hat does not support live patches provided by a third party. For more information about third-party software support policies, see As a customer how does Red Hat support me when I use third party components? 8.3. Access to kernel live patches A kernel module ( kmod ) implements kernel live patching capability and is provided as an RPM package. All customers have access to kernel live patches, which are delivered through the usual channels. However, customers who do not subscribe to an extended support offering will lose access to new patches for the current minor release once the minor release becomes available. For example, customers with standard subscriptions will only be able to live patch RHEL 9.1 kernel until the RHEL 9.2 kernel is released. The components of kernel live patching are as follows: Kernel patch module The delivery mechanism for kernel live patches. A kernel module built specifically for the kernel being patched. The patch module contains the code of the required fixes for the kernel. Patch modules register with the livepatch kernel subsystem and specify the original functions to replace, along with pointers to the replacement functions. Kernel patch modules are delivered as RPMs. The naming convention is kpatch_<kernel version>_<kpatch version>_<kpatch release> . The "kernel version" part of the name has dots replaced with underscores . The kpatch utility A command-line utility for managing patch modules. The kpatch service A systemd service required by multiuser.target . This target loads the kernel patch module at boot time. The kpatch-dnf package A DNF plugin delivered in the form of an RPM package. This plugin manages automatic subscription to kernel live patches. 8.4. The process of live patching kernels The kpatch kernel patching solution uses the livepatch kernel subsystem to redirect outdated functions to updated ones. Applying a live kernel patch to a system triggers the following processes: The kernel patch module is copied to the /var/lib/kpatch/ directory and registered for re-application to the kernel by systemd on boot. The kpatch module loads into the running kernel and the new functions are registered to the ftrace mechanism with a pointer to the location in memory of the new code. When the kernel accesses the patched function, the ftrace mechanism redirects it, bypassing the original functions and leading the kernel to the patched version of the function. Figure 8.1. How kernel live patching works 8.5. Subscribing the currently installed kernels to the live patching stream A kernel patch module is delivered in an RPM package, specific to the version of the kernel being patched. Each RPM package will be cumulatively updated over time. The following procedure explains how to subscribe to all future cumulative live patching updates for a given kernel. Because live patches are cumulative, you cannot select which individual patches are deployed for a given kernel. Warning Red Hat does not support any third party live patches applied to a Red Hat supported system. Prerequisites You have root permissions. Procedure Optional: Check your kernel version: Search for a live patching package that corresponds to the version of your kernel: Install the live patching package: The command above installs and applies the latest cumulative live patches for that specific kernel only. If the version of a live patching package is 1-1 or higher, the package will contain a patch module. In that case the kernel will be automatically patched during the installation of the live patching package. The kernel patch module is also installed into the /var/lib/kpatch/ directory to be loaded by the systemd system and service manager during the future reboots. Note An empty live patching package will be installed when there are no live patches available for a given kernel. An empty live patching package will have a kpatch_version-kpatch_release of 0-0, for example kpatch-patch-5_14_0-1-0-0.x86_64.rpm . The installation of the empty RPM subscribes the system to all future live patches for the given kernel. Verification Verify that all installed kernels have been patched: The output shows that the kernel patch module has been loaded into the kernel that is now patched with the latest fixes from the kpatch-patch-5_14_0-1-0-1.el9.x86_64.rpm package. Note Entering the kpatch list command does not return an empty live patching package. Use the rpm -qa | grep kpatch command instead. Additional resources kpatch(1) manual page Installing RHEL 9 content 8.6. Automatically subscribing any future kernel to the live patching stream You can use the kpatch-dnf DNF plugin to subscribe your system to fixes delivered by the kernel patch module, also known as kernel live patches. The plugin enables automatic subscription for any kernel the system currently uses, and also for kernels to-be-installed in the future . Prerequisites You have root permissions. Procedure Optional: Check all installed kernels and the kernel you are currently running: Install the kpatch-dnf plugin: Enable automatic subscription to kernel live patches: This command subscribes all currently installed kernels to receiving kernel live patches. The command also installs and applies the latest cumulative live patches, if any, for all installed kernels. When you update the kernel, live patches are installed automatically during the new kernel installation process. The kernel patch module is also installed into the /var/lib/kpatch/ directory to be loaded by the systemd system and service manager during future reboots. Note An empty live patching package will be installed when there are no live patches available for a given kernel. An empty live patching package will have a kpatch_version-kpatch_release of 0-0, for example kpatch-patch-5_14_0-1-0-0.el9.x86_64.rpm . The installation of the empty RPM subscribes the system to all future live patches for the given kernel. Verification Verify that all installed kernels are patched: The output shows that both the kernel you are running, and the other installed kernel have been patched with fixes from kpatch-patch-5_14_0-1-0-1.el9.x86_64.rpm and kpatch-patch-5_14_0-2-0-1.el9.x86_64.rpm packages respectively. Note Entering the kpatch list command does not return an empty live patching package. Use the rpm -qa | grep kpatch command instead. Additional resources kpatch(1) and dnf-kpatch(8) manual pages 8.7. Disabling automatic subscription to the live patching stream When you subscribe your system to fixes delivered by the kernel patch module, your subscription is automatic . You can disable this feature, to disable automatic installation of kpatch-patch packages. Prerequisites You have root permissions. Procedure Optional: Check all installed kernels and the kernel you are currently running: Disable automatic subscription to kernel live patches: Verification You can check for the successful outcome: Additional resources kpatch(1) and dnf-kpatch(8) manual pages 8.8. Updating kernel patch modules The kernel patch modules are delivered and applied through RPM packages. The process of updating a cumulative kernel patch module is similar to updating any other RPM package. Prerequisites The system is subscribed to the live patching stream, as described in Subscribing the currently installed kernels to the live patching stream . Procedure Update to a new cumulative version for the current kernel: The command above automatically installs and applies any updates that are available for the currently running kernel. Including any future released cumulative live patches. Alternatively, update all installed kernel patch modules: Note When the system reboots into the same kernel, the kernel is automatically live patched again by the kpatch.service systemd service. Additional resources Updating software packages in RHEL 8.9. Removing the live patching package Disable the Red Hat Enterprise Linux kernel live patching solution by removing the live patching package. Prerequisites Root permissions The live patching package is installed. Procedure Select the live patching package. The example output lists live patching packages that you installed. Remove the live patching package. When a live patching package is removed, the kernel remains patched until the reboot, but the kernel patch module is removed from disk. On future reboot, the corresponding kernel will no longer be patched. Reboot your system. Verify the live patching package is removed: The command displays no output if the package has been successfully removed. Verification Verify the kernel live patching solution is disabled: The example output shows that the kernel is not patched and the live patching solution is not active because there are no patch modules that are currently loaded. Important Currently, Red Hat does not support reverting live patches without rebooting your system. In case of any issues, contact our support team. Additional resources The kpatch(1) manual page Removing installed packages in RHEL 8.10. Uninstalling the kernel patch module Prevent the Red Hat Enterprise Linux kernel live patching solution from applying a kernel patch module on subsequent boots. Prerequisites Root permissions A live patching package is installed. A kernel patch module is installed and loaded. Procedure Select a kernel patch module: Uninstall the selected kernel patch module. Note that the uninstalled kernel patch module is still loaded: When the selected module is uninstalled, the kernel remains patched until the reboot, but the kernel patch module is removed from disk. Reboot your system. Verification Verify that the kernel patch module is uninstalled: This example output shows no loaded or installed kernel patch modules, therefore the kernel is not patched and the kernel live patching solution is not active. Additional resources The kpatch(1) manual page 8.11. Disabling kpatch.service Prevent the Red Hat Enterprise Linux kernel live patching solution from applying all kernel patch modules globally on subsequent boots. Prerequisites Root permissions A live patching package is installed. A kernel patch module is installed and loaded. Procedure Verify kpatch.service is enabled. Disable kpatch.service : Note that the applied kernel patch module is still loaded: Reboot your system. Optional: Verify the status of kpatch.service . The example output testifies that kpatch.service is disabled. Thereby, the kernel live patching solution is not active. Verify that the kernel patch module has been unloaded. The example output above shows that a kernel patch module is still installed but the kernel is not patched. Important Currently, Red Hat does not support reverting live patches without rebooting your system. In case of any issues, contact our support team. Additional resources The kpatch(1) manual page. Managing systemd | [
"uname -r 5.14.0-1.el9.x86_64",
"dnf search USD(uname -r)",
"dnf install \"kpatch-patch = USD(uname -r)\"",
"kpatch list Loaded patch modules: kpatch_5_14_0_1_0_1 [enabled] Installed patch modules: kpatch_5_14_0_1_0_1 (5.14.0-1.el9.x86_64) ...",
"rpm -qa | grep kpatch kpatch-dnf-0.4-3.el9.noarch kpatch-0.9.7-2.el9.noarch kpatch-patch-5_14_0-284_25_1-0-0.el9_2.x86_64",
"dnf list installed | grep kernel Updating Subscription Management repositories. Installed Packages kernel-core.x86_64 5.14.0-1.el9 @beaker-BaseOS kernel-core.x86_64 5.14.0-2.el9 @@commandline uname -r 5.14.0-2.el9.x86_64",
"dnf install kpatch-dnf",
"dnf kpatch auto Updating Subscription Management repositories. Last metadata expiration check: 1:38:21 ago on Fri 17 Sep 2021 07:29:53 AM EDT. Dependencies resolved. ================================================== Package Architecture ================================================== Installing: kpatch-patch-5_14_0-1 x86_64 kpatch-patch-5_14_0-2 x86_64 Transaction Summary =================================================== Install 2 Packages ...",
"kpatch list Loaded patch modules: kpatch_5_14_0_2_0_1 [enabled] Installed patch modules: kpatch_5_14_0_1_0_1 (5.14.0-1.el9.x86_64) kpatch_5_14_0_2_0_1 (5.14.0-2.el9.x86_64)",
"rpm -qa | grep kpatch kpatch-dnf-0.4-3.el9.noarch kpatch-0.9.7-2.el9.noarch kpatch-patch-5_14_0-284_25_1-0-0.el9_2.x86_64",
"dnf list installed | grep kernel Updating Subscription Management repositories. Installed Packages kernel-core.x86_64 5.14.0-1.el9 @beaker-BaseOS kernel-core.x86_64 5.14.0-2.el9 @@commandline uname -r 5.14.0-2.el9.x86_64",
"dnf kpatch manual Updating Subscription Management repositories.",
"yum kpatch status Updating Subscription Management repositories. Last metadata expiration check: 0:30:41 ago on Tue Jun 14 15:59:26 2022. Kpatch update setting: manual",
"dnf update \"kpatch-patch = USD(uname -r)\"",
"dnf update \"kpatch-patch \"",
"dnf list installed | grep kpatch-patch kpatch-patch-5_14_0-1.x86_64 0-1.el9 @@commandline ...",
"dnf remove kpatch-patch-5_14_0-1.x86_64",
"dnf list installed | grep kpatch-patch",
"kpatch list Loaded patch modules:",
"kpatch list Loaded patch modules: kpatch_5_14_0_1_0_1 [enabled] Installed patch modules: kpatch_5_14_0_1_0_1 (5.14.0-1.el9.x86_64) ...",
"kpatch uninstall kpatch_5_14_0_1_0_1 uninstalling kpatch_5_14_0_1_0_1 (5.14.0-1.el9.x86_64)",
"kpatch list Loaded patch modules: kpatch_5_14_0_1_0_1 [enabled] Installed patch modules: < NO_RESULT >",
"kpatch list Loaded patch modules: ...",
"systemctl is-enabled kpatch.service enabled",
"systemctl disable kpatch.service Removed /etc/systemd/system/multi-user.target.wants/kpatch.service.",
"kpatch list Loaded patch modules: kpatch_5_14_0_1_0_1 [enabled] Installed patch modules: kpatch_5_14_0_1_0_1 (5.14.0-1.el9.x86_64)",
"systemctl status kpatch.service ● kpatch.service - \"Apply kpatch kernel patches\" Loaded: loaded (/usr/lib/systemd/system/kpatch.service; disabled; vendor preset: disabled) Active: inactive (dead)",
"kpatch list Loaded patch modules: Installed patch modules: kpatch_5_14_0_1_0_1 (5.14.0-1.el9.x86_64)"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/managing_monitoring_and_updating_the_kernel/applying-patches-with-kernel-live-patching_managing-monitoring-and-updating-the-kernel |
Chapter 50. General Updates | Chapter 50. General Updates The TAB key does not expand USDPWD by default When working in CLI in Red Hat Enterprise Linux 6, pressing the TAB key expanded USDPWD/ to the current directory. In Red Hat Enterprise Linux 7, CLI does not have the same behavior. Users can achieve this behavior by putting the following lines into the USDHOME/.bash_profile file: (BZ#1185416) gnome-getting-started-docs-* moved to the Optional channel As of Red Hat Enterprise Linux 7.3, the gnome-getting-started-docs-* packages have been moved from the Base channel to the Optional channel. Consequently, upgrading from an earlier version of Red Hat Enterprise Linux 7 fails, if these packages were previously installed. To work around this problem, uninstall gnome-getting-started-docs-* prior to upgrading to Red Hat Enterprise Linux 7.3. (BZ#1350802) The remote-viewer SPICE client fails to detect newly plugged-in smart card readers The libcacard library in Red Hat Enterprise Linux 7.3 fails to handle USB hot plug events. As a consequence, while the remote-viewer SPICE client is running, the application in some cases fails to detect a USB smart card reader when it is plugged in. To work around the problem, remove the smart card from the reader and reinsert it. (BZ# 1249116 ) | [
"if ((BASH_VERSINFO[0] >= 4)) && ((BASH_VERSINFO[1] >= 2)); then shopt -s direxpand fi"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/7.3_release_notes/known_issues_general_updates |
function::ipmib_get_proto | function::ipmib_get_proto Name function::ipmib_get_proto - Get the protocol value Synopsis Arguments skb pointer to a struct sk_buff Description Returns the protocol value from skb . | [
"ipmib_get_proto:long(skb:long)"
] | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/systemtap_tapset_reference/api-ipmib-get-proto |
13.2. Preparing for a Driver Update During Installation | 13.2. Preparing for a Driver Update During Installation If a driver update is necessary and available for your hardware, Red Hat or a trusted third party such as the hardware vendor will typically provide it in the form of an image file in ISO format. Some methods of performing a driver update require you to make the image file available to the installation program, while others require you to use the image file to make a driver update disk: Methods that use the image file itself local hard drive USB flash drive Methods that use a driver update disk produced from an image file CD DVD Choose a method to provide the driver update, and refer to Section 13.2.1, "Preparing to Use a Driver Update Image File" , Section 13.2.2, "Preparing a Driver Disc" , or Section 13.2.3, "Preparing an Initial RAM Disk Update" . Note that you can use a USB storage device either to provide an image file, or as a driver update disk. 13.2.1. Preparing to Use a Driver Update Image File 13.2.1.1. Preparing to use an image file on local storage To make the ISO image file available on local storage, such as a hard drive or USB flash drive, you must first determine whether you want to install the updates automatically or select them manually. For manual installations, copy the file onto the storage device. You can rename the file if you find it helpful to do so, but you must not change the filename extension, which must remain .iso . In the following example, the file is named dd.iso : Figure 13.1. Content of a USB flash drive holding a driver update image file Note that if you use this method, the storage device will contain only a single file. This differs from driver discs on formats such as CD and DVD, which contain many files. The ISO image file contains all of the files that would normally be on a driver disc. Refer to Section 13.3.2, "Let the Installer Prompt You for a Driver Update" and Section 13.3.3, "Use a Boot Option to Specify a Driver Update Disk" to learn how to select the driver update manually during installation. For automatic installations, you will need to extract the ISO to the root directory of the storage device rather than simply copy it. Copying the ISO is only effective for manual installations. You must also change the file system label of the device to OEMDRV . The installation program will then automatically examine it for driver updates and load any that it detects. This behavior is controlled by the dlabel=on boot option, which is enabled by default. Refer to Section 6.3.1, "Let the Installer Find a Driver Update Disk Automatically" . | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/installation_guide/sect-preparing_for_a_driver_update_during_installation-ppc |
Chapter 5. Configuring Satellite for performance | Chapter 5. Configuring Satellite for performance Satellite comes with a number of components that communicate with each other. You can tune these components independently of each other to achieve the maximum possible performance for your scenario. 5.1. Applying configurations In following sections we suggest various tunables and how to apply them. Please always test changing these in non production environment first, with valid backup and with proper outage window as in most of the cases Satellite restart is required. It is also a good practice to setup a monitoring before applying any change as it will allow you to evaluate effect of the change. Our testing environment might be too far from what you will see although we are trying hard to mimic real world environment. Changing systemd service files If you have changed some systemd service file, you need to notify systemd daemon to reload the configuration: Restart Satellite services: Changing configuration files If you have changed a configuration file such as /etc/foreman-installer/custom-hiera.yaml , rerun installer to apply your changes: Running installer with additional options If you need to rerun installer with some new options added: Checking basic sanity of the setup Optional: After any change, run this quick Satellite health-check: 5.2. Puma tunings Puma is a ruby application server which is used for serving the Foreman related requests to the clients. For any Satellite configuration that is supposed to handle a large number of clients or frequent operations, it is important for the Puma to be tuned appropriately. 5.2.1. Puma threads Number of Puma threads (per Puma worker) is configured by using two values: threads_min and threads_max . Value of threads_min determines how many threads each worker spawns at a worker start. Then, as concurrent requests are coming and more threads is needed, worker will be spawning more and more workers up to threads_max limit. We recommend setting threads_min to same value as threads_max as having fewer Puma threads lead to higher memory usage on your Satellite Server. For example, we have compared these two setups by using concurrent registrations test: Satellite VM with 8 CPUs, 40 GiB RAM Satellite VM with 8 CPUs, 40 GiB RAM --foreman-foreman-service-puma-threads-min=0 --foreman-foreman-service-puma-threads-min=16 --foreman-foreman-service-puma-threads-max=16 --foreman-foreman-service-puma-threads-max=16 --foreman-foreman-service-puma-workers=2 --foreman-foreman-service-puma-workers=2 Setting the minimum Puma threads to 16 results in about 12% less memory usage as compared to threads_min=0 . 5.2.2. Puma workers and threads auto-tuning If you do not provide any Puma workers and thread values with satellite-installer or they are not present in your Satellite configuration, the satellite-installer configures a balanced number of workers. It follows this formula: This should be fine for most cases, but with some usage patterns tuning is needed to either limit the amount of resources dedicated to Puma (so other Satellite components can use these) or for any other reason. Each Puma worker consumes around 1 GiB of RAM. View your current Satellite Server settings View the currently active Puma workers 5.2.3. Manually tuning Puma workers and threads count If you decide not to rely on Section 5.2.2, "Puma workers and threads auto-tuning" , you can apply custom numbers for these tunables. In the example below we are using 2 workers, 5 and 5 threads: Apply your changes to Satellite Server. For more information, see Section 5.1, "Applying configurations" . 5.2.4. Puma workers and threads recommendations In order to recommend thread and worker configurations for the different tuning profiles, we conducted Puma tuning testing on Satellite with different tuning profiles. The main test used in this testing was concurrent registration with the following combinations along with different number of workers and threads. Our recommendation is based purely on concurrent registration performance, so it might not reflect your exact use-case. For example, if your setup is very content oriented with lots of publishes and promotes, you might want to limit resources consumed by Puma in favor of Pulp and PostgreSQL. Name Number of hosts RAM Cores Recommended Puma Threads for both min & max Recommended Puma Workers default 0 - 5000 20 GiB 4 16 4 - 6 medium 5000 - 10000 32 GiB 8 16 8 - 12 large 10000 - 20000 64 GiB 16 16 12 - 18 extra-large 20000 - 60000 128 GiB 32 16 16 - 24 extra-extra-large 60000+ 256 GiB+ 48+ 16 20 - 26 Tuning number of workers is the more important aspect here and in some case we have seen up to 52% performance increase. Although installer uses 5 min/max threads by default, we recommend 16 threads with all the tuning profiles in the table above. That is because we have seen up to 23% performance increase with 16 threads (14% for 8 and 10% for 32) when compared to setup with 4 threads. To figure out these recommendations we used concurrent registrations test case which is a very specific use-case. It can be different on your Satellite which might have more balanced use-case (not only registrations). Keeping default 5 min/max threads is a good choice as well. These are some of our measurements that lead us to these recommendations: 4 workers, 4 threads 4 workers, 8 threads 4 workers, 16 threads 4 workers, 32 threads Improvement 0% 14% 23% 10% Use 4 - 6 workers on a default setup (4 CPUs) - we have seen about 25% higher performance with 5 workers when compared to 2 workers, but 8% lower performance with 8 workers when compared to 2 workers - see table below: 2 workers, 16 threads 4 workers, 16 threads 6 workers, 16 threads 8 workers, 16 threads Improvement 0% 26% 22% -8% Use 8 - 12 workers on a medium setup (8 CPUs) - see table below: 2 workers, 16 threads 4 workers, 16 threads 8 workers, 16 threads 12 workers, 16 threads 16 workers, 16 threads Improvement 0% 51% 52% 52% 42% Use 16 - 24 workers on a 32 CPUs setup (this was tested on a 90 GiB RAM machine and memory turned out to be a factor here as system started swapping - proper extra-large should have 128 GiB), higher number of workers was problematic for higher registration concurrency levels we tested, so we cannot recommend it. 4 workers, 16 threads 8 workers, 16 threads 16 workers, 16 threads 24 workers, 16 threads 32 workers, 16 threads 48 workers, 16 threads Improvement 0% 37% 44% 52% too many failures too many failures 5.2.5. Configuring Puma workers If you have enough CPUs, adding more workers adds more performance. For example, we have compared Satellite setups with 8 and 16 CPUs: Table 5.1. satellite-installer options used to test effect of workers count Satellite VM with 8 CPUs, 40 GiB RAM Satellite VM with 16 CPUs, 40 GiB RAM --foreman-foreman-service-puma-threads-min=16 --foreman-foreman-service-puma-threads-min=16 --foreman-foreman-service-puma-threads-max=16 --foreman-foreman-service-puma-threads-max=16 --foreman-foreman-service-puma-workers={2|4|8|16} --foreman-foreman-service-puma-workers={2|4|8|16} In 8 CPUs setup, changing the number of workers from 2 to 16, improved concurrent registration time by 36%. In 16 CPUs setup, the same change caused 55% improvement. Adding more workers can also help with total registration concurrency Satellite can handle. In our measurements, setup with 2 workers were able to handle up to 480 concurrent registrations, but adding more workers improved the situation. 5.2.6. Configuring Puma threads More threads allow for lower time to register hosts in parallel. For example, we have compared these two setups: Satellite VM with 8 CPUs, 40 GiB RAM Satellite VM with 8 CPUs, 40 GiB RAM --foreman-foreman-service-puma-threads-min=16 --foreman-foreman-service-puma-threads-min=8 --foreman-foreman-service-puma-threads-max=16 --foreman-foreman-service-puma-threads-max=8 --foreman-foreman-service-puma-workers=2 --foreman-foreman-service-puma-workers=4 Using more workers and the same total number of threads results in about 11% of speedup in highly concurrent registrations scenario. Moreover, adding more workers did not consume more CPU and RAM but gets more performance. 5.2.7. Configuring Puma DB pool The effective value of USDdb_pool is automatically set to equal USDforeman::foreman_service_puma_threads_max . It is the maximum of USDforeman::db_pool and USDforeman::foreman_service_puma_threads_max but both have default value 5, so any increase to the max threads above 5 automatically increases the database connection pool by the same amount. If you encounter ActiveRecord::ConnectionTimeoutError: could not obtain a connection from the pool within 5.000 seconds (waited 5.006 seconds); all pooled connections were in use error in /var/log/foreman/production.log , you might want to increase this value. View current db_pool setting 5.2.8. Manually tuning db_pool If you decide not to rely on automatically configured value, you can apply custom number like this: Apply your changes to Satellite Server. For more information, see Section 5.1, "Applying configurations" . 5.3. Apache HTTPD performance tuning Apache httpd forms a core part of the Satellite and acts as a web server for handling the requests that are being made through the Satellite web UI or exposed APIs. To increase the concurrency of the operations, httpd forms the first point where tuning can help to boost the performance of your Satellite. 5.3.1. Configuring the open files limit for Apache HTTPD With the tuning in place, Apache httpd can easily open a lot of file descriptors on the server which may exceed the default limit of most of the Linux systems in place. To avoid any kind of issues that may arise as a result of exceeding max open files limit on the system, please create the following file and directory and set the contents of the file as specified in the below given example: Procedure Set the maximum open files limit in /etc/systemd/system/httpd.service.d/limits.conf : Apply your changes to Satellite Server. For more information, see Section 5.1, "Applying configurations" . 5.3.2. Tuning Apache httpd child processes By default, httpd uses event request handling mechanism. When the number of requests to httpd exceeds the maximum number of child processes that can be launched to handle the incoming connections, httpd raises an HTTP 503 Service Unavailable error. Amidst httpd running out of processes to handle, the incoming connections can also result in multiple component failures on your Satellite services side due to the dependency of some components on the availability of httpd processes. You can adapt the configuration of httpd event to handle more concurrent requests based on your expected peak load. Warning Configuring these numbers in custom-hiera.yaml locks them. If you change these numbers using satellite-installer --tuning= My_Tuning_Option , your custom-hiera.yaml will overwrite this setting. Set your numbers only if you have specific need for it. Procedure Modify the number of concurrent requests in /etc/foreman-installer/custom-hiera.yaml by changing or adding the following lines: The example is identical to running satellite-installer --tuning=medium or higher on Satellite Server. Apply your changes to Satellite Server. For more information, see Section 5.1, "Applying configurations" . 5.4. Dynflow tuning Dynflow is the workflow management system and task orchestrator which is a Satellite plugin and is used to execute the different tasks of Satellite in an out-of-order execution manner. Under the conditions when there are a lot of clients checking in on Satellite and running a number of tasks, Dynflow can take some help from an added tuning specifying how many executors can it launch. For more information about the tunings involved related to Dynflow, see https://satellite.example.com/foreman_tasks/sidekiq . Increase number of Sidekiq workers Satellite contains a Dynflow service called dynflow-sidekiq that performs tasks scheduled by Dynflow. Sidekiq workers can be grouped into various queues to ensure lots of tasks of one type will not block execution of tasks of other type. Red Hat recommends to increase the number of sidekiq workers to scale the Foreman tasking system for bulk concurrent tasks, for example for multiple content view publications and promotions, content synchronizations, and synchronizations to Capsule Servers. There are two options available: You can increase the number of threads used by a worker (worker's concurrency). This has limited impact for values larger than five due to Ruby implementation of the concurrency of threads. You can increase the number of workers, which is recommended. Procedure Increase the number of workers from one worker to three while remaining five threads/concurrency of each: Optional: Check if there are three worker services: For more information, see How to add sidekiq workers in Satellite6? . 5.5. Pull-based REX transport tuning Satellite has a pull-based transport mode for remote execution. This transport mode uses MQTT as its messaging protocol and includes an MQTT client running on each host. For more information, see Transport Modes for Remote Execution in Managing hosts . 5.5.1. Increasing host limit for pull-based REX transport You can tune the mosquitto MQTT server and increase the number of hosts connected to it. Procedure Enable pull-based remote execution on your Satellite Server or Capsule Server: Note that your Satellite Server or Capsule Server can only use one transport mode, either SSH or MQTT. Create a config file to increase the default number of hosts accepted by the MQTT service: This example sets the limit to allow the mosquitto service to handle 5000 hosts. Run the following commands to apply your changes: 5.5.2. Decreasing performance impact of the pull-based REX transport When Satellite Server is configured with the pull-based transport mode for remote execution jobs using the Script provider, Capsule Server sends notifications about new jobs to clients through MQTT. This notification does not include the actual workload that the client is supposed to execute. After a client receives a notification about a new remote execution job, it queries Capsule Server for its actual workload. During the job, the client periodically sends outputs of the job to Capsule Server, further increasing the number of requests to Capsule Server. These requests to Capsule Server together with high concurrency allowed by the MQTT protocol can cause exhaustion of available connections on Capsule Server. Some requests might fail, making some child tasks of remote execution jobs unresponsive. This also depends on actual job workload, as some jobs are causing additional load to Satellite Server, making it compete for resources if clients are registered to Satellite Server. To avoid this, configure your Satellite Server and Capsule Server with the following parameters: MQTT Time To Live - Time interval in seconds given to the host to pick up the job before considering the job undelivered MQTT Resend Interval - Time interval in seconds at which the notification should be re-sent to the host until the job is picked up or cancelled MQTT Rate Limit - Number of jobs that are allowed to run at the same time. You can limit the concurrency of remote execution by tuning the rate limit which means you are going to put more load on Satellite. Procedure Tune the MQTT parameters on your Satellite Server: Capsule Server logs are in /var/log/foreman-proxy/proxy.log . Capsule Server uses Webrick HTTP server (no httpd or Puma involved), so there is no simple way to increase its capacity. Note Depending on the workload, number of hosts, available resources, and applied tuning, you might hit the Bug 2244811 , which causes Capsule to consume too much memory and eventually be killed, making the rest of the job fail. At the moment there is no universally applicable workaround. 5.6. PostgreSQL tuning PostgreSQL is the primary SQL based database that is used by Satellite for the storage of persistent context across a wide variety of tasks that Satellite does. The database sees an extensive usage is usually working on to provide the Satellite with the data which it needs for its smooth functioning. This makes PostgreSQL a heavily used process which if tuned can have a number of benefits on the overall operational response of Satellite. The PostgreSQL authors recommend disabling Transparent Hugepage on servers running PostgreSQL. For more information, see Section 4.3, "Disable Transparent Hugepage" . You can apply a set of tunings to PostgreSQL to improve its response times, which will modify the postgresql.conf file. Procedure Append /etc/foreman-installer/custom-hiera.yaml to tune PostgreSQL: postgresql::server::config_entries: max_connections: 1000 shared_buffers: 2GB work_mem: 8MB autovacuum_vacuum_cost_limit: 2000 You can use this to effectively tune down your Satellite instance irrespective of a tuning profile. Apply your changes to Satellite Server. For more information, see Section 5.1, "Applying configurations" . In the above tuning configuration, there are a certain set of keys which we have altered: max_connections : The key defines the maximum number of connections that can be accepted by the PostgreSQL processes that are running. shared_buffers : The shared buffers define the memory used by all the active connections inside PostgreSQL to store the data for the different database operations. An optimal value for this will vary between 2 GiB to a maximum of 25% of your total system memory depending upon the frequency of the operations being conducted on Satellite. work_mem : The work_mem is the memory that is allocated on per process basis for PostgreSQL and is used to store the intermediate results of the operations that are being performed by the process. Setting this value to 8 MB should be more than enough for most of the intensive operations on Satellite. autovacuum_vacuum_cost_limit : The key defines the cost limit value for the vacuuming operation inside the autovacuum process to clean up the dead tuples inside the database relations. The cost limit defines the number of tuples that can be processed in a single run by the process. Red Hat recommends setting the value to 2000 as it is for the medium , large , extra-large , and extra-extra-large profiles, based on the general load that Satellite pushes on the PostgreSQL server process. For more information, see BZ1867311: Upgrade fails when checkpoint_segments postgres parameter configured . 5.6.1. Benchmarking raw DB performance To get a list of the top table sizes in disk space for both Candlepin, Foreman, and Pulp check postgres-size-report script in satellite-support git repository. PGbench utility (note you may need to resize PostgreSQL data directory /var/lib/pgsql directory to 100 GiB or what does benchmark take to run) might be used to measure PostgreSQL performance on your system. Use dnf install postgresql-contrib to install it. For more information, see github.com/RedHatSatellite/satellite-support . Choice of filesystem for PostgreSQL data directory might matter as well. Warning Never do any testing on production system and without valid backup. Before you start testing, see how big the database files are. Testing with a really small database would not produce any meaningful results. For example, if the DB is only 20 GiB and the buffer pool is 32 GiB, it won't show problems with large number of connections because the data will be completely buffered. 5.7. Redis tuning Redis is an in-memory data store. It is used by multiple services in Satellite. The Dynflow and Pulp tasking systems use it to track their tasks. Given the way Satellite uses Redis, its memory consumption should be stable. The Redis authors recommend disabling Transparent Hugepage on servers running Redis. For more about it please see Section 4.3, "Disable Transparent Hugepage" . 5.8. Capsule configuration tuning Capsules are meant to offload part of Satellite load and provide access to different networks related to distributing content to clients but they can also be used to execute remote execution jobs. What they cannot help with is anything which extensively uses Satellite API as host registration or package profile update. 5.8.1. Capsule performance tests We have measured multiple test cases on multiple Capsule configurations: Capsule HW configuration CPUs RAM minimal 4 12 GiB large 8 24 GiB extra large 16 46 GiB Content delivery use case In a download test where we concurrently downloaded a 40MB repo of 2000 packages on 100, 200, .. 1000 hosts, we saw roughly 50% improvement in average download duration every time when we doubled Capsule Server resources. For more precise numbers, see the table below. Concurrent downloading hosts Minimal (4 CPU and 12 GiB RAM) Large (8 CPU and 24 GiB RAM) Large (8 CPU and 24 GiB RAM) Extra Large (16 CPU and 46 GiB RAM) Minimal (4 CPU and 12 GiB RAM) Extra Large (16 CPU and 46 GiB RAM) Average Improvement ~ 50% (e.g. for 700 concurrent downloads in average 9 seconds vs. 4.4 seconds per package) ~ 40% (e.g. for 700 concurrent downloads in average 4.4 seconds vs. 2.5 seconds per package) ~ 70% (e.g. for 700 concurrent downloads in average 9 seconds vs. 2.5 seconds per package) When we compared download performance from Satellite Server vs. from Capsule Server, we have seen only about 5% speedup, but that is expected as Capsule Server's main benefit is in getting content closer to geographically distributed clients (or clients in different networks) and in handling part of the load Satellite Server would have to handle itself. In some smaller hardware configurations (8 CPUs and 24 GiB), Satellite Server was not able to handle downloads from more than 500 concurrent clients, while a Capsule Server with the same hardware configuration was able to service more than 1000 and possibly even more. Concurrent registrations use case For concurrent registrations, a bottleneck is usually CPU speed, but all configs were able to handle even high concurrency without swapping. Hardware resources used for Capsule have only minimal impact on registration performance. For example, Capsule Server with 16 CPUs and 46 GiB RAM have at most a 9% registration speed improvement when compared to a Capsule Server with 4 CPUs and 12 GiB RAM. During periods of very high concurrency, you might experience timeouts in the Capsule Server to Satellite Server communication. You can alleviate this by increasing the default timeout by using the following tunable in /etc/foreman-installer/custom-hiera.yaml : Remote execution use case We have tested executing Remote Execution jobs via both SSH and Ansible backend on 500, 2000 and 4000 hosts. All configurations were able to handle all of the tests without errors, except for the smallest configuration (4 CPUs and 12 GiB memory) which failed to finish on all 4000 hosts. Content sync use case In a sync test where we synced Red Hat Enterprise Linux 6, 7, 8 BaseOS and 8 AppStream we have not seen significant differences among Capsule configurations. This will be different for syncing a higher number of content views in parallel. | [
"systemctl daemon-reload",
"satellite-maintain service restart",
"satellite-installer",
"satellite-installer new options",
"satellite-maintain health check",
"min(CPU_COUNT * 1.5, RAM_IN_GB - 1.5)",
"cat /etc/systemd/system/foreman.service.d/installer.conf",
"systemctl status foreman",
"satellite-installer --foreman-foreman-service-puma-workers=2 --foreman-foreman-service-puma-threads-min=5 --foreman-foreman-service-puma-threads-max=5",
"grep pool /etc/foreman/database.yml pool: 5",
"satellite-installer --foreman-db-pool 10",
"[Service] LimitNOFILE=640000",
"apache::mod::event::serverlimit: 64 apache::mod::event::maxrequestworkers: 1024 apache::mod::event::maxrequestsperchild: 4000",
"satellite-installer --foreman-dynflow-worker-instances 3 # optionally, add --foreman-dynflow-worker-concurrency 5",
"systemctl -a | grep dynflow-sidekiq@worker-[0-9] [email protected] loaded active running Foreman jobs daemon - worker-1 on sidekiq [email protected] loaded active running Foreman jobs daemon - worker-2 on sidekiq [email protected] loaded active running Foreman jobs daemon - worker-3 on sidekiq",
"satellite-installer --foreman-proxy-plugin-remote-execution-script-mode pull-mqtt",
"cat >/etc/systemd/system/mosquitto.service.d/limits.conf << EOF [Service] LimitNOFILE=5000 EOF",
"systemctl daemon-reload systemctl restart mosquitto.service",
"satellite-installer --foreman-proxy-plugin-remote-execution-script-mqtt-rate-limit My_MQTT_Rate_Limit --foreman-proxy-plugin-remote-execution-script-mqtt-resend-interval My_MQTT_Resend_Interval --foreman-proxy-plugin-remote-execution-script-mqtt-ttl My_MQTT_Time_To_Live",
"postgresql::server::config_entries: max_connections: 1000 shared_buffers: 2GB work_mem: 8MB autovacuum_vacuum_cost_limit: 2000",
"apache::mod::proxy::proxy_timeout: 600"
] | https://docs.redhat.com/en/documentation/red_hat_satellite/6.16/html/tuning_performance_of_red_hat_satellite/configuring_project_for_performance_performance-tuning |
Making open source more inclusive | Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message . | null | https://docs.redhat.com/en/documentation/migration_toolkit_for_runtimes/1.2/html/maven_plugin_guide/making-open-source-more-inclusive |
Chapter 21. Monitoring containers | Chapter 21. Monitoring containers Use Podman commands to manage a Podman environment. With that, you can determine the health of the container, by displaying system and pod information, and monitoring Podman events. 21.1. Using a health check on a container You can use the health check to determine the health or readiness of the process running inside the container. If the health check succeeds, the container is marked as "healthy"; otherwise, it is "unhealthy". You can compare a health check with running the podman exec command and examining the exit code. The zero exit value means that the container is "healthy". Health checks can be set when building an image using the HEALTHCHECK instruction in the Containerfile or when creating the container on the command line. You can display the health-check status of a container using the podman inspect or podman ps commands. A health check consists of six basic components: Command Retries Interval Start-period Timeout Container recovery The description of health check components follows: Command ( --health-cmd option) Podman executes the command inside the target container and waits for the exit code. The other five components are related to the scheduling of the health check and they are optional. Retries ( --health-retries option) Defines the number of consecutive failed health checks that need to occur before the container is marked as "unhealthy". A successful health check resets the retry counter. Interval ( --health-interval option) Describes the time between running the health check command. Note that small intervals cause your system to spend a lot of time running health checks. The large intervals cause struggles with catching time outs. Start-period ( --health-start-period option) Describes the time between when the container starts and when you want to ignore health check failures. Timeout ( --health-timeout option) Describes the period of time the health check must complete before being considered unsuccessful. Note The values of the Retries, Interval, and Start-period components are time durations, for example "30s" or "1h15m". Valid time units are "ns," "us," or "ms", "ms," "s," "m," and "h". Container recovery ( --health-on-failure option) Determines which actions to perform when the status of a container is unhealthy. When the application fails, Podman restarts it automatically to provide robustness. The --health-on-failure option supports four actions: none : Take no action, this is the default action. kill : Kill the container. restart : Restart the container. stop : Stop the container. Note The --health-on-failure option is available in Podman version 4.2 and later. Warning Do not combine the restart action with the --restart option. When running inside of a systemd unit, consider using the kill or stop action instead, to make use of systemd restart policy. Health checks run inside the container. Health checks only make sense if you know what the health state of the service is and can differentiate between a successful and unsuccessful health check. Additional resources podman-healthcheck and podman-run man pages on your system Podman at the edge: Keeping services alive with custom healthcheck actions Monitoring container vitality and availability with Podman 21.2. Performing a health check using the command line You can set a health check when creating the container on the command line. Prerequisites The container-tools module is installed. Procedure Define a health check: The --health-cmd option sets a health check command for the container. The --health-interval=0 option with 0 value indicates that you want to run the health check manually. Check the health status of the hc-container container: Using the podman inspect command: Using the podman ps command: Using the podman healthcheck run command: Additional resources podman-healthcheck and podman-run man pages on your system Podman at the edge: Keeping services alive with custom healthcheck actions Monitoring container vitality and availability with Podman 21.3. Performing a health check using a Containerfile You can set a health check by using the HEALTHCHECK instruction in the Containerfile . Prerequisites The container-tools module is installed. Procedure Create a Containerfile : Note The HEALTHCHECK instruction is supported only for the docker image format. For the oci image format, the instruction is ignored. Build the container and add an image name: Run the container: Check the health status of the hc-container container: Using the podman inspect command: Using the podman ps command: Using the podman healthcheck run command: Additional resources podman-healthcheck and podman-run man pages on your system Podman at the edge: Keeping services alive with custom healthcheck actions Monitoring container vitality and availability with Podman 21.4. Displaying Podman system information The podman system command enables you to manage the Podman systems by displaying system information. Prerequisites The container-tools module is installed. Procedure Display Podman system information: To show Podman disk usage, enter: To show detailed information about space usage, enter: To display information about the host, current storage stats, and build of Podman, enter: To remove all unused containers, images and volume data, enter: The podman system prune command removes all unused containers (both dangling and unreferenced), pods and optionally, volumes from local storage. Use the --all option to delete all unused images. Unused images are dangling images and any image that does not have any containers based on it. Use the --volume option to prune volumes. By default, volumes are not removed to prevent important data from being deleted if there is currently no container using the volume. Additional resources podman-system-df , podman-system-info , and podman-system-prune man pages on your system 21.5. Podman event types You can monitor events that occur in Podman. Several event types exist and each event type reports different statuses. The container event type reports the following statuses: attach checkpoint cleanup commit create exec export import init kill mount pause prune remove restart restore start stop sync unmount unpause The pod event type reports the following statuses: create kill pause remove start stop unpause The image event type reports the following statuses: prune push pull save remove tag untag The system type reports the following statuses: refresh renumber The volume type reports the following statuses: create prune remove Additional resources podman-events man page on your system 21.6. Monitoring Podman events You can monitor and print events that occur in Podman using the podman events command. Each event will include a timestamp, a type, a status, name, if applicable, and image, if applicable. Prerequisites The container-tools module is installed. Procedure Run the myubi container: Display the Podman events: To display all Podman events, enter: The --stream=false option ensures that the podman events command exits when reading the last known event. You can see several events that happened when you enter the podman run command: container create when creating a new container. image pull when pulling an image if the container image is not present in the local storage. container init when initializing the container in the runtime and setting a network. container start when starting the container. container attach when attaching to the terminal of a container. That is because the container runs in the foreground. container died is emitted when the container exits. container remove because the --rm flag was used to remove the container after it exits. You can also use the journalctl command to display Podman events: To show only Podman create events, enter: You can also use the journalctl command to display Podman create events: Additional resources podman-events man page on your system Container Events and Auditing 21.7. Using Podman events for auditing Previously, the events had to be connected to an event to interpret them correctly. For example, the container-create event had to be linked with an image-pull event to know which image had been used. The container-create event also did not include all data, for example, the security settings, volumes, mounts, and so on. Beginning with Podman v4.4, you can gather all relevant information about a container directly from a single event and journald entry. The data is in JSON format, the same as from the podman container inspect command and includes all configuration and security settings of a container. You can configure Podman to attach the container-inspect data for auditing purposes. Prerequisites The container-tools module is installed. Procedure Modify the ~/.config/containers/containers.conf file and add the events_container_create_inspect_data=true option to the [engine] section: For the system-wide configuration, modify the /etc/containers/containers.conf or /usr/share/container/containers.conf file. Create the container: Display the Podman events: Using the podman events command: The --format "{{.ContainerInspectData}}" option displays the inspect data. The jq ".Config.CreateCommand" transforms the JSON data into a more readable format and displays the parameters for the podman create command. Using the journalctl command: The output data for the podman events and journalctl commands are the same. Additional resources podman-events and containers.conf man pages on your system Container Events and Auditing | [
"podman run -dt --name=hc-container -p 8080:8080 --health-cmd='curl http://localhost:8080 || exit 1' --health-interval=0 registry.access.redhat.com/ubi8/httpd-24",
"podman inspect --format='{{json .State.Health.Status}}' hc-container healthy",
"podman ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a680c6919fe localhost/hc-container:latest /usr/bin/run-http... 2 minutes ago Up 2 minutes (healthy) hc-container",
"podman healthcheck run hc-container healthy",
"cat Containerfile FROM registry.access.redhat.com/ubi8/httpd-24 EXPOSE 8080 HEALTHCHECK CMD curl http://localhost:8080 || exit 1",
"podman build --format=docker -t hc-container . STEP 1/3: FROM registry.access.redhat.com/ubi8/httpd-24 STEP 2/3: EXPOSE 8080 --> 5aea97430fd STEP 3/3: HEALTHCHECK CMD curl http://localhost:8080 || exit 1 COMMIT health-check Successfully tagged localhost/health-check:latest a680c6919fe6bf1a79219a1b3d6216550d5a8f83570c36d0dadfee1bb74b924e",
"podman run -dt --name=hc-container localhost/hc-container",
"podman inspect --format='{{json .State.Health.Status}}' hc-container healthy",
"podman ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a680c6919fe localhost/hc-container:latest /usr/bin/run-http... 2 minutes ago Up 2 minutes (healthy) hc-container",
"podman healthcheck run hc-container healthy",
"podman system df TYPE TOTAL ACTIVE SIZE RECLAIMABLE Images 3 2 1.085GB 233.4MB (0%) Containers 2 0 28.17kB 28.17kB (100%) Local Volumes 3 0 0B 0B (0%)",
"podman system df -v Images space usage: REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS registry.access.redhat.com/ubi8 latest b1e63aaae5cf 13 days 233.4MB 233.4MB 0B 0 registry.access.redhat.com/ubi8/httpd-24 latest 0d04740850e8 13 days 461.5MB 0B 461.5MB 1 registry.redhat.io/rhel8/podman latest dce10f591a2d 13 days 390.6MB 233.4MB 157.2MB 1 Containers space usage: CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES 311180ab99fb 0d04740850e8 /usr/bin/run-httpd 0 28.17kB 16 hours exited hc1 bedb6c287ed6 dce10f591a2d podman run ubi8 echo hello 0 0B 11 hours configured dazzling_tu Local Volumes space usage: VOLUME NAME LINKS SIZE 76de0efa83a3dae1a388b9e9e67161d28187e093955df185ea228ad0b3e435d0 0 0B 8a1b4658aecc9ff38711a2c7f2da6de192c5b1e753bb7e3b25e9bf3bb7da8b13 0 0B d9cab4f6ccbcf2ac3cd750d2efff9d2b0f29411d430a119210dd242e8be20e26 0 0B",
"podman system info host: arch: amd64 buildahVersion: 1.22.3 cgroupControllers: [] cgroupManager: cgroupfs cgroupVersion: v1 conmon: package: conmon-2.0.29-1.module+el8.5.0+12381+e822eb26.x86_64 path: /usr/bin/conmon version: 'conmon version 2.0.29, commit: 7d0fa63455025991c2fc641da85922fde889c91b' cpus: 2 distribution: distribution: '\"rhel\"' version: \"8.5\" eventLogger: file hostname: localhost.localdomain idMappings: gidmap: - container_id: 0 host_id: 1000 size: 1 - container_id: 1 host_id: 100000 size: 65536 uidmap: - container_id: 0 host_id: 1000 size: 1 - container_id: 1 host_id: 100000 size: 65536 kernel: 4.18.0-323.el8.x86_64 linkmode: dynamic memFree: 352288768 memTotal: 2819129344 ociRuntime: name: runc package: runc-1.0.2-1.module+el8.5.0+12381+e822eb26.x86_64 path: /usr/bin/runc version: |- runc version 1.0.2 spec: 1.0.2-dev go: go1.16.7 libseccomp: 2.5.1 os: linux remoteSocket: path: /run/user/1000/podman/podman.sock security: apparmorEnabled: false capabilities: CAP_NET_RAW,CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID,CAP_SYS_CHROOT rootless: true seccompEnabled: true seccompProfilePath: /usr/share/containers/seccomp.json selinuxEnabled: true serviceIsRemote: false slirp4netns: executable: /usr/bin/slirp4netns package: slirp4netns-1.1.8-1.module+el8.5.0+12381+e822eb26.x86_64 version: |- slirp4netns version 1.1.8 commit: d361001f495417b880f20329121e3aa431a8f90f libslirp: 4.4.0 SLIRP_CONFIG_VERSION_MAX: 3 libseccomp: 2.5.1 swapFree: 3113668608 swapTotal: 3124752384 uptime: 11h 24m 12.52s (Approximately 0.46 days) registries: search: - registry.fedoraproject.org - registry.access.redhat.com - registry.centos.org - docker.io store: configFile: /home/user/.config/containers/storage.conf containerStore: number: 2 paused: 0 running: 0 stopped: 2 graphDriverName: overlay graphOptions: overlay.mount_program: Executable: /usr/bin/fuse-overlayfs Package: fuse-overlayfs-1.7.1-1.module+el8.5.0+12381+e822eb26.x86_64 Version: |- fusermount3 version: 3.2.1 fuse-overlayfs: version 1.7.1 FUSE library version 3.2.1 using FUSE kernel interface version 7.26 graphRoot: /home/user/.local/share/containers/storage graphStatus: Backing Filesystem: xfs Native Overlay Diff: \"false\" Supports d_type: \"true\" Using metacopy: \"false\" imageStore: number: 3 runRoot: /run/user/1000/containers volumePath: /home/user/.local/share/containers/storage/volumes version: APIVersion: 3.3.1 Built: 1630360721 BuiltTime: Mon Aug 30 23:58:41 2021 GitCommit: \"\" GoVersion: go1.16.7 OsArch: linux/amd64 Version: 3.3.1",
"podman system prune WARNING! This will remove: - all stopped containers - all stopped pods - all dangling images - all build cache Are you sure you want to continue? [y/N] y",
"podman run -q --rm --name=myubi registry.access.redhat.com/ubi8/ubi:latest",
"now=USD(date --iso-8601=seconds) podman events --since=now --stream=false 2023-03-08 14:27:20.696167362 +0100 CET container create d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi,...) 2023-03-08 14:27:20.652325082 +0100 CET image pull registry.access.redhat.com/ubi8/ubi:latest 2023-03-08 14:27:20.795695396 +0100 CET container init d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi...) 2023-03-08 14:27:20.809205161 +0100 CET container start d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi...) 2023-03-08 14:27:20.809903022 +0100 CET container attach d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi...) 2023-03-08 14:27:20.831710446 +0100 CET container died d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi...) 2023-03-08 14:27:20.913786892 +0100 CET container remove d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi...)",
"journalctl --user -r SYSLOG_IDENTIFIER=podman Mar 08 14:27:20 fedora podman[129324]: 2023-03-08 14:27:20.913786892 +0100 CET m=+0.066920979 container remove Mar 08 14:27:20 fedora podman[129289]: 2023-03-08 14:27:20.696167362 +0100 CET m=+0.079089208 container create d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72f>",
"podman events --filter event=create 2023-03-08 14:27:20.696167362 +0100 CET container create d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72fe09 (image=registry.access.redhat.com/ubi8/ubi:latest, name=myubi,...)",
"journalctl --user -r PODMAN_EVENT=create Mar 08 14:27:20 fedora podman[129289]: 2023-03-08 14:27:20.696167362 +0100 CET m=+0.079089208 container create d4748226a2bcd271b1bc4b9f88b54e8271c13ffea9b30529968291c62d72f>",
"cat ~/.config/containers/containers.conf [engine] events_container_create_inspect_data=true",
"podman create registry.access.redhat.com/ubi8/ubi:latest 19524fe3c145df32d4f0c9af83e7964e4fb79fc4c397c514192d9d7620a36cd3",
"now=USD(date --iso-8601=seconds) podman events --since USDnow --stream=false --format \"{{.ContainerInspectData}}\" | jq \".Config.CreateCommand\" [ \"/usr/bin/podman\", \"create\", \"registry.access.redhat.com/ubi8\" ]",
"journalctl --user -r PODMAN_EVENT=create --all -o json | jq \".PODMAN_CONTAINER_INSPECT_DATA | fromjson\" | jq \".Config.CreateCommand\" [ \"/usr/bin/podman\", \"create\", \"registry.access.redhat.com/ubi8\" ]"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/building_running_and_managing_containers/assembly_monitoring-containers |
Chapter 6. Discovering hosts on a network | Chapter 6. Discovering hosts on a network Red Hat Satellite can detect hosts on a network that are not in your Satellite inventory. These hosts boot the Discovery image that performs hardware detection and relays this information back to Satellite Server. This method creates a list of ready-to-provision hosts in Satellite Server without needing to enter the MAC address of each host. 6.1. Prerequisites for using Discovery Ensure that the DHCP range of all subnets that you plan to use for Discovery does not overlap with the DHCP lease pool configured for the managed DHCP service. The DHCP range is set in the Satellite web UI, whereas the lease pool range is set by using the satellite-installer command. For example, in the 10.1.0.0/16 network range, you can allocate the following IP address blocks: 10.1.0.0 to 10.1.127.255 for leases. 10.1.128.0 to 10.1.255.254 for reservations. Ensure the host or virtual machine being discovered has at least 1200 MB of memory. Insufficient memory can cause various random kernel panic errors because the Discovery image is extracted in memory. 6.2. Installing the Discovery service The Discovery service is enabled by default on Satellite Server. Additionally, you can enable the Discovery service on any Capsule Servers that provide the TFTP service. The Discovery service requires a Discovery image, which is provided with Red Hat Satellite. The Discovery image uses a minimal operating system that is booted on hosts to acquire initial hardware information and check in with Satellite. The Foreman Discovery image provided with Satellite is based on Red Hat Enterprise Linux 8. Procedure Install foreman-discovery-image on Satellite Server: The foreman-discovery-image package installs the Discovery ISO to the /usr/share/foreman-discovery-image/ directory. The package also extracts the PXE boot image to the /var/lib/tftpboot/boot/fdi-image directory. If you want to use Capsule Server, install the Discovery plugin on Capsule Server: If you want to use Capsule Server, install foreman-discovery-image on Capsule Server: The package also extracts the PXE boot image to the /var/lib/tftpboot/boot/fdi-image directory. Configure the Discovery Capsule for the subnet with discoverable hosts: In the Satellite web UI, navigate to Infrastructure > Subnets . Select a subnet. On the Capsules tab, select the Discovery Capsule that you want to use. Perform this for each subnet that you want to use. 6.3. Discovery in PXE mode Satellite provides a PXE-based Discovery service that uses DHCP and TFTP services. You discover unknown nodes by booting them into the Discovery kernel and initial RAM disk images from Satellite Server or Capsule Server. When a discovered node is scheduled for installation, it reboots and continues with the configured PXE-based host provisioning. Figure 6.1. Discovery workflow in PXE mode 6.3.1. Setting Discovery as the default PXE boot option Set the Discovery service as the default service that boots for hosts that are not present in your current Satellite inventory. When you start an unknown host in PXE mode, Satellite Server or Capsule Server provides a boot menu with a default boot option. The boot menu has two basic options: local and discovery . The default setting of the global PXE templates is to select local to boot the host from the local hard drive. Change the setting to select discovery to boot from the Discovery image. Prerequisites Your Satellite account has the view_settings , edit_settings , and view_provisioning_templates permissions. Procedure In the Satellite web UI, navigate to Administer > Settings . On the Provisioning tab, enter discovery in the Default PXE global template entry field. Navigate to Hosts > Templates > Provisioning Templates . Click Build PXE Default . The boot menus are built as the following files: /var/lib/tftpboot/pxelinux.cfg/default /var/lib/tftpboot/grub2/grub.cfg Satellite propagates the default boot menus to all TFTP Capsules. 6.3.2. Performing Discovery in PXE mode Discovery in PXE mode uses the Discovery PXE boot images and runs unattended. Prerequisites You have installed the Discovery service and image. For more information, see Section 6.2, "Installing the Discovery service" . You have set Discovery as the default booting option. For more information, see Section 6.3.1, "Setting Discovery as the default PXE boot option" . Procedure Power on or reboot your host. After a few minutes, the Discovery image completes booting and the host displays a status screen. Verification Satellite web UI displays a notification about a new discovered host. steps In the Satellite web UI, navigate to Hosts > Discovered Hosts and view the newly discovered host. For more information about provisioning discovered hosts, see Section 6.6, "Creating hosts from discovered hosts" . 6.3.3. Customizing the Discovery PXE boot Satellite builds PXE boot menus from the following global provisioning templates: PXELinux global default for BIOS provisioning. PXEGrub global default and PXEGrub2 global default for UEFI provisioning. The PXE boot menus are available on Satellite Server and Capsules that have TFTP enabled. The Discovery menu item uses a Linux kernel for the operating system and passes kernel parameters to configure the Discovery service. You can customize the passed kernel parameters by changing the following snippets: pxelinux_discovery : This snippet is included in the PXELinux global default template. This snippet renders the Discovery boot menu option. The KERNEL and APPEND options boot the Discovery kernel and initial RAM disk. The APPEND option contains kernel parameters. pxegrub_discovery : This snippet is included in the PXEGrub global default template. However, Discovery is not implemented for GRUB 1.x . pxegrub2_discovery : This snippet is included in the PXEGrub2 global default template. This snippet renders the Discovery GRUB2 menu entry. The common variable contains kernel parameters. For information about the kernel parameters, see Section 6.9, "Kernel parameters for Discovery customization" . Procedure In the Satellite web UI, navigate to Hosts > Templates > Provisioning Templates . Clone and edit the snippet you want to customize. For more information, see Section 2.15, "Cloning provisioning templates" . Clone and edit the template that contains the original snippet. Include your custom snippet instead of the original snippet. For more information, see Section 2.15, "Cloning provisioning templates" . Navigate to Administer > Settings . Click the Provisioning tab. In the appropriate Global default PXE\ template* setting, select your custom template. Navigate to Hosts > Templates > Provisioning Templates . Click Build PXE Default . This refreshes the default PXE boot menus on Satellite Server and any TFTP Capsules. 6.3.4. Discovering hosts from multiple Capsule Servers Satellite deploys the same template to all TFTP Capsules and there is no variable or macro available to render the host name of Capsule. The hard-coded proxy.url does not work with two or more TFTP Capsules. As a workaround, every time you click Build PXE Defaults , edit the configuration file in the TFTP directory on all Capsule Servers by using SSH, or use a common DNS alias for appropriate subnets. To use Capsule Server to proxy the Discovery steps, edit /var/lib/tftpboot/pxelinux.cfg/default or /var/lib/tftpboot/grub2/grub.cfg , and change the URL to the Capsule Server FQDN you want to use. 6.4. Discovery in PXE-less mode Satellite provides a PXE-less Discovery service for environments without DHCP and TFTP services. You discover unknown nodes by using the Discovery ISO from Satellite Server. When a discovered node is scheduled for installation, the kexec command reloads a Linux kernel with an operating system installer without rebooting the node. Important Discovery kexec is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information, see Technology Preview Features - Scope of Support . Known issues The console might freeze during the process. On some hardware, you might experience graphical hardware problems. Figure 6.2. Discovery workflow in PXE-less mode 6.4.1. Performing Discovery in PXE-less mode Discovery in PXE-less mode uses the Discovery ISO and requires you to attend to the process. Prerequisites You have installed the Foreman Discovery image. For more information, see Section 6.2, "Installing the Discovery service" . Procedure Copy the Discovery ISO to a CD, DVD, or a USB flash drive. For example, to copy to a USB drive at /dev/sdb : Insert the Discovery boot media into a host, start the host, and boot from the media. The Discovery image displays options for either Manual network setup or Discovery with DHCP : Manual network setup : On the Primary interface screen, select the primary network interface that connects to Satellite Server or Capsule Server. Optionally, enter a VLAN ID . Hit Select to continue. On the Network configuration screen, enter the Address , Gateway , and DNS . Hit to continue. Discovery with DHCP : On the Primary interface screen, select the primary network interface that connects to Satellite Server or Capsule Server. Optionally, enter a VLAN ID . Hit Select to continue. The Discovery image attempts to automatically configure the network interface by using a DHCP server, such as one that a Capsule Server provides. On the Credentials screen, enter the following options: In the Server URL field, enter the URL of Satellite Server or Discovery Capsule Server. If you refer to a Capsule Server, include the Capsule port number. In the Connection type field, select the connection type: Server for Satellite Server or Foreman Proxy for Capsule Server. Hit to continue. Optional: On the Custom facts screen, enter custom facts for the Facter tool to relay back to Satellite Server. Enter a name and value for each custom fact you need. Hit Confirm to proceed. Verification Satellite web UI displays a notification about a new discovered host. steps In the Satellite web UI, navigate to Hosts > Discovered Hosts and view the newly discovered host. For more information about provisioning discovered hosts, see Section 6.6, "Creating hosts from discovered hosts" . 6.4.2. Customizing the Discovery ISO You can create a customized Discovery ISO to automate the image configuration process after booting. The Discovery image uses a Linux kernel for the operating system, which passes kernel parameters to configure the Discovery service. Satellite Server provides the discovery-remaster tool in the foreman-discovery-image package. By using this tool, remaster the image to include custom kernel parameters. Procedure Run the discovery-remaster tool. Enter the kernel parameters as a single string. For example: For more information about kernel parameters, see Section 6.9, "Kernel parameters for Discovery customization" . Copy the new ISO to either a CD, DVD, or a USB stick. For example, to copy to a USB stick at /dev/sdb : steps Insert the Discovery boot medium into a bare metal host, start the host, and boot from the medium. For more information about provisioning discovered hosts, see Section 6.6, "Creating hosts from discovered hosts" . 6.5. Automatic contexts for discovered hosts Satellite Server assigns an organization and location to discovered hosts automatically according to the following sequence of rules: If a discovered host uses a subnet defined in Satellite, the host uses the first organization and location associated with the subnet. If the default location and organization is configured in global settings, the discovered hosts are placed in this organization and location. To configure these settings, navigate to Administer > Settings > Discovery and select values for the Discovery organization and Discovery location settings. Ensure that the subnet of discovered host also belongs to the selected organization and location, otherwise Satellite refuses to set it for security reasons. If none of the conditions is met, Satellite assigns the first organization and location ordered by name. You can change the organization or location manually by using the bulk actions on the Discovered Hosts page. Select the discovered hosts to modify and, from the Select Action menu, select Assign Organization or Assign Location . 6.6. Creating hosts from discovered hosts Provisioning discovered hosts follows a provisioning process that is similar to PXE provisioning. The main difference is that instead of manually entering the host's MAC address, you can select the host to provision from the list of discovered hosts. To use the CLI instead of the Satellite web UI, see the CLI procedure . Prerequisites Configure a domain and subnet on Satellite. For more information about networking requirements, see Chapter 3, Configuring networking . You have one or more discovered hosts in your Satellite inventory. You can use synchronized content repositories for Red Hat Enterprise Linux. For more information, see Syncing Repositories in Managing content . Provide an activation key for host registration. For more information, see Creating An Activation Key in Managing content . You have associated a Discovery kexec -kind template and provisioning -kind template with the operating system. For more information, see Section 2.18, "Associating templates with operating systems" . For information about the security tokens, see Section 4.2, "Configuring the security token validity duration" . Procedure In the Satellite web UI, navigate to Hosts > Discovered hosts . Select the host you want to provision and click Provision to the right of the list. Select one of the following options: To provision a host from a host group, select a host group, organization, and location, and then click Create Host . To provision a host with further customization, click Customize Host and enter the additional details you want to specify for the new host. Verify that the fields are populated with values. Note in particular: The Name from the Host tab becomes the DNS name . Satellite Server automatically assigns an IP address for the new host. Satellite Server automatically populates the MAC address from the Discovery results. Ensure that Satellite Server automatically selects the Managed , Primary , and Provision options for the first interface on the host. If not, select them. Click the Operating System tab, and verify that all fields contain values. Confirm each aspect of the operating system. In Provisioning templates , click Resolve to check if the new host can identify the correct provisioning templates. The host must resolve to the following provisioning templates: Discovery kexec : Discovery Red Hat kexec Provisioning template : Kickstart default Click Submit to save the host details. When the host provisioning is complete, the discovered host moves to Hosts > All Hosts . CLI procedure Identify the discovered host to provision: Select the host and provision it by using a host group. Set a new host name with the --new-name option: This removes the host from the discovered host listing and creates a host entry with the provisioning settings. The Discovery image automatically reboots the host to PXE or initiates kernel execution. The host detects the DHCP service and starts installing the operating system. The rest of the process is identical to the normal PXE workflow described in Section 4.3, "Creating hosts with unattended provisioning" . 6.7. Creating Discovery rules As a method of automating the provisioning process for discovered hosts, Satellite provides a feature to create Discovery rules. These rules define how discovered hosts automatically provision themselves, based on the assigned host group. For example, you can automatically provision hosts with a high CPU count as hypervisors. Likewise, you can provision hosts with large hard disks as storage servers. To use the CLI instead of the Satellite web UI, see the CLI procedure . NIC considerations Auto provisioning does not currently allow configuring network interface cards (NICs). All systems are being provisioned with the NIC configuration that was detected during discovery. However, you can set the NIC in the kickstart scriptlet, by using a script, or by using configuration management at a later stage. Procedure In the Satellite web UI, navigate to Configure > Discovery rules , and select Create Rule . In the Name field, enter a name for the rule. In the Search field, enter the rules to determine whether to provision a host. This field provides suggestions for values you enter and allows operators for multiple rules. For example: cpu_count > 8 . From the Host Group list, select the host group to use as a template for this host. In the Hostname field, enter the pattern to determine host names for multiple hosts. This uses the same ERB syntax that provisioning templates use. The host name can use the @host attribute for host-specific values and the rand macro for a random number or the sequence_hostgroup_param_next macro for incrementing the value. For more information about provisioning templates, see Section 2.12, "Provisioning templates" and the API documentation. myhost-<%= sequence_hostgroup_param_next("EL7/MyHostgroup", 10, "discovery_host") %> myhost-<%= rand(99999) %> abc-<%= @host.facts['bios_vendor'] %>-<%= rand(99999) %> xyz-<%= @host.hostgroup.name %> srv-<%= @host.discovery_rule.name %> server-<%= @host.ip.gsub('.','-') + '-' + @host.hostgroup.subnet.name %> When creating host name patterns, ensure that the resulting host names are unique, do not start with numbers, and do not contain underscores or dots. A good approach is to use unique information provided by Facter, such as the MAC address, BIOS, or serial ID. In the Hosts limit field, enter the maximum number of hosts that you can provision with the rule. Enter 0 for unlimited. In the Priority field, enter a number to set the precedence the rule has over other rules. Rules with lower values have a higher priority. From the Enabled list, select whether you want to enable the rule. To set a different provisioning context for the rule, click the Organizations and Locations tabs and select the contexts you want to use. Click Submit to save your rule. In the Satellite web UI, navigate to Hosts > Discovered Host and select one of the following two options: From the Discovered hosts list on the right, select Auto-Provision to automatically provisions a single host. On the upper right of the window, click Auto-Provision All to automatically provisions all hosts. CLI procedure Create the rule by using Hammer: Automatically provision a host with the hammer discovery auto-provision command: 6.8. Extending the Discovery image You can extend the Satellite Discovery image with custom facts, software, or device drivers. You can also provide a compressed archive file containing extra code for the image to use. Procedure Create the following directory structure: The autostart.d directory contains scripts that are executed in POSIX order by the Discovery kernel when it starts but before the host is registered to Satellite. The bin directory is added to the USDPATH variable; you can place binary files in this directory and use them in the autostart scripts. The facts directory is added to the FACTERLIB variable so that custom facts can be configured and sent to Satellite. The lib directory is added to the LD_LIBRARY_PATH variable and lib/ruby is added to the RUBYLIB variable, so that binary files in /bin can be executed correctly. After creating the directory structure, create a .zip file archive with the following command: Inform the Discovery kernel of the extensions it must use. Place your zip files on your TFTP server with the Discovery image and customize the Discovery PXE boot with the fdi.zips parameter where the paths are relative to the TFTP root. For example, if you have two archives at USDTFTP/zip1.zip and USDTFTP/boot/zip2.zip , use the following syntax: For more information, see Section 6.3.3, "Customizing the Discovery PXE boot" . You can append new directives and options to the existing environment variables ( PATH , LD_LIBRARY_PATH , RUBYLIB and FACTERLIB ). If you want to specify the path explicitly in your scripts, the .zip file contents are extracted to the /opt/extension directory on the image. You can create multiple .zip files but be aware that they are extracted to the same location on the Discovery image. Files extracted from in later .zip files overwrite earlier versions if they contain the same file name. 6.9. Kernel parameters for Discovery customization Discovery uses a Linux kernel for the operating system and passes kernel parameters to configure the Discovery service. These kernel parameters include the following entries: fdi.cachefacts Number of fact uploads without caching. By default, Satellite does not cache any uploaded facts. fdi.countdown Number of seconds to wait until the text-user interface is refreshed after the initial discovery attempt. This value defaults to 45 seconds. Increase this value if the status page reports the IP address as N/A . fdi.dhcp_timeout NetworkManager DHCP timeout. The default value is 300 seconds. fdi.dns_nameserver Nameserver to use for DNS SRV record. fdi.dns_ndots ndots option to use for DNS SRV record. fdi.dns_search Search domain to use for DNS SRV record. fdi.initnet By default, the image initializes all network interfaces (value all ). When this setting is set to bootif , only the network interface it was network-booted from will be initialized. fdi.ipv4.method By default, NetworkManager IPv4 method setting is set to auto . This option overrides it, set it to ignore to disable the IPv4 stack. This option works only in DHCP mode. fdi.ipv6.method By default, NetworkManager IPv6 method setting is set to auto . This option overrides it, set it to ignore to disable the IPv6 stack. This option only works in DHCP mode. fdi.ipwait Duration in seconds to wait for IP to be available in HTTP proxy SSL cert start. By default, Satellite waits for 120 seconds. fdi.nmwait nmcli -wait option for NetworkManager. By default, nmcli waits for 120 seconds. fdi.proxy_cert_days Number of days the self-signed HTTPS cert is valid for. By default, the certificate is valid for 999 days. fdi.pxauto To set automatic or semi-automatic mode. If set to 0, the image uses semi-automatic mode, which allows you to confirm your choices through a set of dialog options. If set to 1, the image uses automatic mode and proceeds without any confirmation. fdi.pxfactname1, fdi.pxfactname2 ... fdi.pxfactnameN Use to specify custom fact names. fdi.pxfactvalue1, fdi.pxfactvalue2 ... fdi.pxfactvalueN The values for each custom fact. Each value corresponds to a fact name. For example, fdi.pxfactvalue1 sets the value for the fact named with fdi.pxfactname1 . fdi.pxip, fdi.pxgw, fdi.pxdns Manually configures IP address ( fdi.pxip ), the gateway ( fdi.pxgw ), and the DNS ( fdi.pxdns ) for the primary network interface. If you omit these parameters, the image uses DHCP to configure the network interface. You can add multiple DNS entries in a comma-separated [1] list, for example fdi.pxdns=192.168.1.1,192.168.200.1 . fdi.pxmac The MAC address of the primary interface in the format of AA:BB:CC:DD:EE:FF . This is the interface you aim to use for communicating with Capsule Server. In automated mode, the first NIC (using network identifiers in alphabetical order) with a link is used. In semi-automated mode, a screen appears and requests you to select the correct interface. fdi.rootpw By default, the root account is locked. Use this option to set a root password. You can enter both clear and encrypted passwords. fdi.ssh By default, the SSH service is disabled. Set this to 1 or true to enable SSH access. fdi.uploadsleep Duration in seconds between facter runs. By default, facter runs every 30 seconds. fdi.vlan.primary VLAN tagging ID to set for the primary interface. If you want to use tagged VLAN provisioning and you want the Discovery service to send a discovery request, add the following parameter to the Discovery snippet: fdi.zips Filenames with extensions to be downloaded and started during boot. For more information, see Section 6.8, "Extending the Discovery image" . fdi.zipserver TFTP server to use to download extensions from. For more information, see Section 6.8, "Extending the Discovery image" . net.ifnames and biosdevname Because network interface names are not expected to always be the same between major versions of Red Hat Enterprise Linux, hosts can be created with incorrect network configurations. You can disable the new naming scheme by a kernel command line parameter: For Dell servers, use the biosdevname=1 parameter. For other hardware or virtual machines, use the net.ifnames=1 parameter. proxy.type The proxy type. By default, this parameter is set to foreman , where communication goes directly to Satellite Server. Set this parameter to proxy if you point to Capsule in proxy.url . proxy.url The URL of the server providing the Discovery service. By default, this parameter contains the foreman_server_url macro as its argument. This macro resolves to the full URL of Satellite Server. There is no macro for a Capsule URL. You have to set a Capsule explicitly. For example: You can use an IP address or FQDN in this parameter. Add a SSL port number if you point to Capsule. 6.10. Troubleshooting Discovery If a machine is not listed in the Satellite web UI in Hosts > Discovered Hosts , it means that Discovery has failed. Inspect the following configuration areas to help isolate the problem: Inspecting prerequisites Ensure that your Satellite and hosts meet the requirements. For more information, see Section 6.1, "Prerequisites for using Discovery" . Inspecting problems on Satellite Ensure you have set Discovery for booting and built the PXE boot configuration files. For more information, see Section 6.3.1, "Setting Discovery as the default PXE boot option" . Verify that these configuration files are present on your TFTP Capsule and have discovery set as the default boot option: /var/lib/tftpboot/pxelinux.cfg/default /var/lib/tftpboot/grub2/grub.cfg Verify that the values of the proxy.url and proxy.type options in the PXE Discovery snippet you are using. The default snippets are named pxelinux_discovery , pxegrub_discovery , or pxegrub2_discovery . Inspecting problems with networking Ensure adequate network connectivity between hosts, Capsule Server, and Satellite Server. Ensure that the DHCP server provides IP addresses to the booted Discovery image correctly. Ensure that DNS is working correctly for the discovered hosts or use an IP address in the proxy.url option in the PXE Discovery snippet included in the PXE template you are using. Inspecting problems on the host If the host boots into the Discovery image but Discovery is not successful, enable the root account and SSH access on the Discovery image. You can enable SSH and set the root password by using the following Discovery kernel options: Using TTY2 or higher, log in to a Discovery-booted host to review system logs. For example, these logs are useful for troubleshooting: discover-host Initial facts upload foreman-discovery Facts refresh, reboot remote commands nm-prepare Boot script which pre-configures NetworkManager NetworkManager Networking information For gathering important system facts, use the discovery-debug command on the Discovery-booted host. It prints out system logs, network configuration, list of facts, and other information on the standard output. You can redirect this output to a file and copy it with the scp command for further investigation. Additional resources For more information about changing the Discovery kernel options, see the following resources: Section 6.3.3, "Customizing the Discovery PXE boot" Section 6.4.2, "Customizing the Discovery ISO" [1] NetworkManager expects ; as a list separator but currently also accepts , . For more information, see man nm-settings-keyfile and Shell-like scripting in GRUB | [
"satellite-maintain packages install foreman-discovery-image",
"satellite-installer --enable-foreman-proxy-plugin-discovery",
"satellite-maintain packages install foreman-discovery-image",
"dd bs=4M if=/usr/share/foreman-discovery-image/foreman-discovery-image- version .iso of=/dev/sdb",
"discovery-remaster ~/iso/foreman-discovery-image- version .iso \"fdi.pxip=192.168.140.20/24 fdi.pxgw=192.168.140.1 fdi.pxdns=192.168.140.2 proxy.url=https:// satellite.example.com :9090 proxy.type=proxy fdi.pxfactname1= My_Custom_Hostname fdi.pxfactvalue1= My_Host fdi.pxmac=52:54:00:be:8e:8c fdi.pxauto=1\"",
"dd bs=4M if=/usr/share/foreman-discovery-image/foreman-discovery-image- version .iso of=/dev/sdb",
"hammer discovery list",
"hammer discovery provision --build true --enabled true --hostgroup \" My_Host_Group \" --location \" My_Location \" --managed true --name \" My_Host_Name \" --new-name \" My_New_Host_Name \" --organization \" My_Organization \"",
"hammer discovery-rule create --enabled true --hostgroup \" My_Host_Group \" --hostname \"hypervisor-<%= rand(99999) %>\" --hosts-limit 5 --name \" My_Hypervisor \" --priority 5 --search \"cpu_count > 8\"",
"hammer discovery auto-provision --name \"macabcdef123456\"",
". ├── autostart.d │ └── 01_zip.sh ├── bin │ └── ntpdate ├── facts │ └── test.rb └── lib ├── libcrypto.so.1.0.0 └── ruby └── test.rb",
"zip -r my_extension.zip .",
"fdi.zips=zip1.zip,boot/zip2.zip",
"fdi.vlan.primary= My_VLAN_ID",
"proxy.url=https:// capsule.example.com :9090 proxy.type=proxy",
"fdi.ssh=1 fdi.rootpw= My_Password"
] | https://docs.redhat.com/en/documentation/red_hat_satellite/6.16/html/provisioning_hosts/discovering-hosts-on-a-network_provisioning |
Scalability and performance | Scalability and performance OpenShift Container Platform 4.15 Scaling your OpenShift Container Platform cluster and tuning performance in production environments Red Hat OpenShift Documentation Team | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/scalability_and_performance/index |
Chapter 1. Cluster lifecycle with multicluster engine operator overview | Chapter 1. Cluster lifecycle with multicluster engine operator overview The multicluster engine operator is the cluster lifecycle operator that provides cluster management capabilities for OpenShift Container Platform and Red Hat Advanced Cluster Management hub clusters. From the hub cluster, you can create and manage clusters, as well as destroy any clusters that you created. You can also hibernate, resume, and detach clusters. Learn more about the cluster lifecycle capabilities from the following documentation. Access the multicluster engine operator Support matrix to learn about hub cluster and managed cluster requirements and support. Notes: Your cluster is created by using the OpenShift Container Platform cluster installer with the Hive resource. You can find more information about the process of installing OpenShift Container Platform clusters at Installing and configuring OpenShift Container Platform clusters in the OpenShift Container Platform documentation. With your OpenShift Container Platform cluster, you can use multicluster engine operator as a standalone cluster manager for cluster lifecycle function, or you can use it as part of a Red Hat Advanced Cluster Management hub cluster. If you are using OpenShift Container Platform only, the operator is included with subscription. Visit About multicluster engine for Kubernetes operator from the OpenShift Container Platform documentation. If you subscribe to Red Hat Advanced Cluster Management, you also receive the operator with installation. You can create, manage, and monitor other Kubernetes clusters with the Red Hat Advanced Cluster Management hub cluster. See the Red Hat Advanced Cluster Management Installing and upgrading documentation. Release images are the version of OpenShift Container Platform that you use when you create a cluster. For clusters that are created using Red Hat Advanced Cluster Management, you can enable automatic upgrading of your release images. For more information about release images in Red Hat Advanced Cluster Management, see Release images . About cluster lifecycle with multicluster engine operator Release notes Installing and upgrading multicluster engine operator Managing credentials Cluster lifecycle introduction Discovery service introduction Hosted control planes APIs Troubleshooting The components of the cluster lifecycle management architecture are included in the Cluster lifecycle architecture . 1.1. Release notes Learn about new features, bug fixes, and more for cluster lifecycle and the 2.6 version of multicluster engine operator. Deprecated: multicluster engine operator 2.3 and earlier versions are no longer supported. The documentation might remain available, but without any Errata or other updates. Best practice: Upgrade to the most recent version. What's new in multicluster engine operator Errata updates Cluster lifecycle known issues Deprecations and removals If you experience issues with one of the currently supported releases, or the product documentation, go to Red Hat Support where you can troubleshoot, view Knowledgebase articles, connect with the Support Team, or open a case. You must log in with your credentials. You can also learn more about the Customer Portal documentation at Red Hat Customer Portal FAQ . The documentation references the earliest supported OpenShift Container Platform version, unless a specific component or function is introduced and tested only on a more recent version of OpenShift Container Platform. For full support information, see the multicluster engine operator Support matrix . For lifecycle information, see Red Hat OpenShift Container Platform Life Cycle policy . 1.1.1. What's new in Cluster lifecycle with the multicluster engine operator Learn about new features for creating, importing, managing, and destroying Kubernetes clusters across various infrastructure cloud providers, private clouds, and on-premises data centers. Important: Cluster lifecycle now supports all providers that are certified through the Cloud Native Computing Foundation (CNCF) Kubernetes Conformance Program. Choose a vendor that is recognized by CNFC for your hybrid cloud multicluster management. See the following information about using CNFC providers: Learn how CNFC providers are certified at Certified Kubernetes Conformance . For Red Hat support information about CNFC third-party providers, see Red Hat support with third party components , or Contact Red Hat support . If you bring your own CNFC conformance certified cluster, you need to change the OpenShift Container Platform CLI oc command to the Kubernetes CLI command, kubectl . 1.1.1.1. New features and enhancements for components Learn more about new features for specific components. Note: Some features and components are identified and released as Technology Preview . Cluster lifecycle Hosted control planes Red Hat Advanced Cluster Management integration 1.1.1.2. Cluster lifecycle Learn about new features and enhancements for Cluster lifecycle with multicluster engine operator. You can now push custom labels to the automation job pod created by the ClusterCurator . See Pushing custom labels from the ClusterCurator resource to the automation job pod to learn more. You can now automatically apply configurations in the global KlusterletConfig on every managed cluster, without binding a managed cluster to a KlusterletConfig with an annotation. To learn more, see Configuring the global KlusterletConfig . PlacementDecision now works more efficiently by excluding managed clusters that you are deleting. To learn more about placements, see Placement overview . You can now enable a discovered cluster to automatically import supported clusters into your hub cluster with the discovery-operator . Import clusters for faster cluster management. See Enabling a discovered cluster for management You can use the ClusterCurator resource to perform easier, automatic upgrades from Extended Update Support (EUS) to EUS. The spec.upgrade.intermediateUpdate is added to the ClusterCurator resource with the intermediate release value before the final upgrade to the desiredUpdate . Find the procedure and the example at Using the ClusterCurator for Extended Update Support (EUS) upgrades . You can now use the Infrastructure Operator for Red Hat OpenShift to install a cluster in FIPS mode. See Installing a FIPS-enabled cluster by using the Infrastructure Operator for Red Hat OpenShift . The local-cluster is now imported automatically if you have both an AgentServiceConfig and ManagedCluster custom resource with the necessary annotations. 1.1.1.3. Hosted control planes Starting with OpenShift Container Platform 4.16, hosted control planes supports the user-provisioned installation and attachment of logical partition (LPAR) as compute nodes on IBM Z and IBM LinuxOne. To learn more, see Adding IBM Z LPAR as agents . Configuring hosted control plane clusters on AWS is now generally available. You can deploy the HyperShift Operator on an existing managed cluster by using the hypershift-addon managed cluster add-on to enable that cluster as a hosting cluster and start to create the hosted cluster. See Configuring hosted control plane clusters on AWS for details. The --sts-creds and --role-arn flags replace the deprecated --aws-creds flag in the hcp command line interface. Create an Amazon Web Services (AWS) Identity and Access Management (IAM) role and Security Token Service (STS) credentials to use the --sts-creds and --role-arn flags. For more information, see Creating an AWS IAM role and STS credentials . 1.1.1.4. Red Hat Advanced Cluster Management integration You can now automate the import of OpenShift Service on AWS clusters by using Red Hat Advanced Cluster Management policy enforcement with multicluster engine operator for faster cluster management. Learn more at Automating import for discovered OpenShift Service on AWS clusters . You can now discover and automate the import of hosted clusters, as well. Configure Red Hat Advanced Cluster Management to import multicluster engine operator clusters and then use policy enforcement for automation. Learn more at Automating import for discovered hosted clusters . 1.1.2. Cluster lifecycle known issues Review the known issues for cluster lifecycle with multicluster engine operator. The following list contains known issues for this release, or known issues that continued from the release. For your OpenShift Container Platform cluster, see OpenShift Container Platform release notes . Cluster lifecycle Hosted control planes 1.1.2.1. Cluster management Cluster lifecycle known issues and limitations are part of the Cluster lifecycle with multicluster engine operator documentation. 1.1.2.1.1. Limitation with nmstate Develop quicker by configuring copy and paste features. To configure the copy-from-mac feature in the assisted-installer , you must add the mac-address to the nmstate definition interface and the mac-mapping interface. The mac-mapping interface is provided outside the nmstate definition interface. As a result, you must provide the same mac-address twice. 1.1.2.1.2. Prehook failure does not fail the hosted cluster creation If you use the automation template for the hosted cluster creation and the prehook job fails, then it looks like the hosted cluster creation is still progressing. This is normal because the hosted cluster was designed with no complete failure state, and therefore, it keeps trying to create the cluster. 1.1.2.1.3. Manual removal of the VolSync CSV required on managed cluster when removing the add-on When you remove the VolSync ManagedClusterAddOn from the hub cluster, it removes the VolSync operator subscription on the managed cluster but does not remove the cluster service version (CSV). To remove the CSV from the managed clusters, run the following command on each managed cluster from which you are removing VolSync: If you have a different version of VolSync installed, replace v0.6.0 with your installed version. 1.1.2.1.4. Deleting a managed cluster set does not automatically remove its label After you delete a ManagedClusterSet , the label that is added to each managed cluster that associates the cluster to the cluster set is not automatically removed. Manually remove the label from each of the managed clusters that were included in the deleted managed cluster set. The label resembles the following example: cluster.open-cluster-management.io/clusterset:<ManagedClusterSet Name> . 1.1.2.1.5. ClusterClaim error If you create a Hive ClusterClaim against a ClusterPool and manually set the ClusterClaimspec lifetime field to an invalid golang time value, the product stops fulfilling and reconciling all ClusterClaims , not just the malformed claim. You see the following error in the clusterclaim-controller pod logs, which is a specific example with the PoolName and invalid lifetime included: You can delete the invalid claim. If the malformed claim is deleted, claims begin successfully reconciling again without any further interaction. 1.1.2.1.6. The product channel out of sync with provisioned cluster The clusterimageset is in fast channel, but the provisioned cluster is in stable channel. Currently the product does not sync the channel to the provisioned OpenShift Container Platform cluster. Change to the right channel in the OpenShift Container Platform console. Click Administration > Cluster Settings > Details Channel . 1.1.2.1.7. Restoring the connection of a managed cluster with custom CA certificates to its restored hub cluster might fail After you restore the backup of a hub cluster that managed a cluster with custom CA certificates, the connection between the managed cluster and the hub cluster might fail. This is because the CA certificate was not backed up on the restored hub cluster. To restore the connection, copy the custom CA certificate information that is in the namespace of your managed cluster to the <managed_cluster>-admin-kubeconfig secret on the restored hub cluster. Tip: If you copy this CA certificate to the hub cluster before creating the backup copy, the backup copy includes the secret information. When the backup copy is used to restore in the future, the connection between the hub and managed clusters will automatically complete. 1.1.2.1.8. The local-cluster might not be automatically recreated If the local-cluster is deleted while disableHubSelfManagement is set to false , the local-cluster is recreated by the MulticlusterHub operator. After you detach a local-cluster, the local-cluster might not be automatically recreated. To resolve this issue, modify a resource that is watched by the MulticlusterHub operator. See the following example: To properly detach the local-cluster, set the disableHubSelfManagement to true in the MultiClusterHub . 1.1.2.1.9. Selecting a subnet is required when creating an on-premises cluster When you create an on-premises cluster using the console, you must select an available subnet for your cluster. It is not marked as a required field. 1.1.2.1.10. Cluster provisioning with Infrastructure Operator fails When creating OpenShift Container Platform clusters using the Infrastructure Operator, the file name of the ISO image might be too long. The long image name causes the image provisioning and the cluster provisioning to fail. To determine if this is the problem, complete the following steps: View the bare metal host information for the cluster that you are provisioning by running the following command: Run the describe command to view the error information: An error similar to the following example indicates that the length of the filename is the problem: If this problem occurs, it is typically on the following versions of OpenShift Container Platform, because the infrastructure operator was not using image service: 4.8.17 and earlier 4.9.6 and earlier To avoid this error, upgrade your OpenShift Container Platform to version 4.8.18 or later, or 4.9.7 or later. 1.1.2.1.11. Cannot use host inventory to boot with the discovery image and add hosts automatically You cannot use a host inventory, or InfraEnv custom resource, to both boot with the discovery image and add hosts automatically. If you used your InfraEnv resource for the BareMetalHost resource, and you want to boot the image yourself, you can work around the issue by creating a new InfraEnv resource. 1.1.2.1.12. Local-cluster status offline after reimporting with a different name When you accidentally try to reimport the cluster named local-cluster as a cluster with a different name, the status for local-cluster and for the reimported cluster display offline . To recover from this case, complete the following steps: Run the following command on the hub cluster to edit the setting for self-management of the hub cluster temporarily: Add the setting spec.disableSelfManagement=true . Run the following command on the hub cluster to delete and redeploy the local-cluster: Enter the following command to remove the local-cluster management setting: Remove spec.disableSelfManagement=true that you previously added. 1.1.2.1.13. Cluster provision with Ansible automation fails in proxy environment An Automation template that is configured to automatically provision a managed cluster might fail when both of the following conditions are met: The hub cluster has cluster-wide proxy enabled. The Ansible Automation Platform can only be reached through the proxy. 1.1.2.1.14. Cannot delete managed cluster namespace manually You cannot delete the namespace of a managed cluster manually. The managed cluster namespace is automatically deleted after the managed cluster is detached. If you delete the managed cluster namespace manually before the managed cluster is detached, the managed cluster shows a continuous terminating status after you delete the managed cluster. To delete this terminating managed cluster, manually remove the finalizers from the managed cluster that you detached. 1.1.2.1.15. Hub cluster and managed clusters clock not synced Hub cluster and manage cluster time might become out-of-sync, displaying in the console unknown and eventually available within a few minutes. Ensure that the OpenShift Container Platform hub cluster time is configured correctly. See Customizing nodes . 1.1.2.1.16. Importing certain versions of IBM OpenShift Container Platform Kubernetes Service clusters is not supported You cannot import IBM OpenShift Container Platform Kubernetes Service version 3.11 clusters. Later versions of IBM OpenShift Kubernetes Service are supported. 1.1.2.1.17. Automatic secret updates for provisioned clusters is not supported When you change your cloud provider access key on the cloud provider side, you also need to update the corresponding credential for this cloud provider on the console of multicluster engine operator. This is required when your credentials expire on the cloud provider where the managed cluster is hosted and you try to delete the managed cluster. 1.1.2.1.18. Node information from the managed cluster cannot be viewed in search Search maps RBAC for resources in the hub cluster. Depending on user RBAC settings, users might not see node data from the managed cluster. Results from search might be different from what is displayed on the Nodes page for a cluster. 1.1.2.1.19. Process to destroy a cluster does not complete When you destroy a managed cluster, the status continues to display Destroying after one hour, and the cluster is not destroyed. To resolve this issue complete the following steps: Manually ensure that there are no orphaned resources on your cloud, and that all of the provider resources that are associated with the managed cluster are cleaned up. Open the ClusterDeployment information for the managed cluster that is being removed by entering the following command: Replace mycluster with the name of the managed cluster that you are destroying. Replace namespace with the namespace of the managed cluster. Remove the hive.openshift.io/deprovision finalizer to forcefully stop the process that is trying to clean up the cluster resources in the cloud. Save your changes and verify that ClusterDeployment is gone. Manually remove the namespace of the managed cluster by running the following command: Replace namespace with the namespace of the managed cluster. 1.1.2.1.20. Cannot upgrade OpenShift Container Platform managed clusters on OpenShift Container Platform Dedicated with the console You cannot use the Red Hat Advanced Cluster Management console to upgrade OpenShift Container Platform managed clusters that are in the OpenShift Container Platform Dedicated environment. 1.1.2.1.21. Work manager add-on search details The search details page for a certain resource on a certain managed cluster might fail. You must ensure that the work-manager add-on in the managed cluster is in Available status before you can search. 1.1.2.1.22. Non-OpenShift Container Platform managed clusters require ManagedServiceAccount or LoadBalancer for pod logs The ManagedServiceAccount and cluster proxy add-ons are enabled by default in Red Hat Advanced Cluster Management version 2.10 and newer. If the add-ons are disabled after upgrading, you must enable the ManagedServiceAccount and cluster proxy add-ons manually to use the pod log feature on non-OpenShift Container Platform managed clusters. See ManagedServiceAccount add-on to learn how to enable ManagedServiceAccount and see Using cluster proxy add-ons to learn how to enable a cluster proxy add-on. 1.1.2.1.23. OpenShift Container Platform 4.10.z does not support hosted control plane clusters with proxy configuration When you create a hosting service cluster with a cluster-wide proxy configuration on OpenShift Container Platform 4.10.z, the nodeip-configuration.service service does not start on the worker nodes. 1.1.2.1.24. Cannot provision OpenShift Container Platform 4.11 cluster on Azure Provisioning an OpenShift Container Platform 4.11 cluster on Azure fails due to an authentication operator timeout error. To work around the issue, use a different worker node type in the install-config.yaml file or set the vmNetworkingType parameter to Basic . See the following install-config.yaml example: compute: - hyperthreading: Enabled name: 'worker' replicas: 3 platform: azure: type: Standard_D2s_v3 osDisk: diskSizeGB: 128 vmNetworkingType: 'Basic' 1.1.2.1.25. Client cannot reach iPXE script iPXE is an open source network boot firmware. See iPXE for more details. When booting a node, the URL length limitation in some DHCP servers cuts off the ipxeScript URL in the InfraEnv custom resource definition, resulting in the following error message in the console: no bootable devices To work around the issue, complete the following steps: Apply the InfraEnv custom resource definition when using an assisted installation to expose the bootArtifacts , which might resemble the following file: Create a proxy server to expose the bootArtifacts with short URLs. Copy the bootArtifacts and add them them to the proxy by running the following commands: Add the ipxeScript artifact proxy URL to the bootp parameter in libvirt.xml . 1.1.2.1.26. Cannot delete ClusterDeployment after upgrading Red Hat Advanced Cluster Management If you are using the removed BareMetalAssets API in Red Hat Advanced Cluster Management 2.6, the ClusterDeployment cannot be deleted after upgrading to Red Hat Advanced Cluster Management 2.7 because the BareMetalAssets API is bound to the ClusterDeployment . To work around the issue, run the following command to remove the finalizers before upgrading to Red Hat Advanced Cluster Management 2.7: 1.1.2.1.27. A cluster deployed in a disconnected environment by using the central infrastructure management service might not install When you deploy a cluster in a disconnected environment by using the central infrastructure management service, the cluster nodes might not start installing. This issue occurs because the cluster uses a discovery ISO image that is created from the Red Hat Enterprise Linux CoreOS live ISO image that is shipped with OpenShift Container Platform versions 4.12.0 through 4.12.2. The image contains a restrictive /etc/containers/policy.json file that requires signatures for images sourcing from registry.redhat.io and registry.access.redhat.com . In a disconnected environment, the images that are mirrored might not have the signatures mirrored, which results in the image pull failing for cluster nodes at discovery. The Agent image fails to connect with the cluster nodes, which causes communication with the assisted service to fail. To work around this issue, apply an ignition override to the cluster that sets the /etc/containers/policy.json file to unrestrictive. The ignition override can be set in the InfraEnv custom resource definition. The following example shows an InfraEnv custom resource definition with the override: apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: cluster namespace: cluster spec: ignitionConfigOverride: '{"ignition":{"version":"3.2.0"},"storage":{"files":[{"path":"/etc/containers/policy.json","mode":420,"overwrite":true,"contents":{"source":"data:text/plain;charset=utf-8;base64,ewogICAgImRlZmF1bHQiOiBbCiAgICAgICAgewogICAgICAgICAgICAidHlwZSI6ICJpbnNlY3VyZUFjY2VwdEFueXRoaW5nIgogICAgICAgIH0KICAgIF0sCiAgICAidHJhbnNwb3J0cyI6CiAgICAgICAgewogICAgICAgICAgICAiZG9ja2VyLWRhZW1vbiI6CiAgICAgICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAgICAgIiI6IFt7InR5cGUiOiJpbnNlY3VyZUFjY2VwdEFueXRoaW5nIn1dCiAgICAgICAgICAgICAgICB9CiAgICAgICAgfQp9"}}]}}' The following example shows the unrestrictive file that is created: After this setting is changed, the clusters install. 1.1.2.1.28. Managed cluster stuck in Pending status after deployment The converged flow is the default process of provisioning. When you use the BareMetalHost resource for the Bare Metal Operator (BMO) to connect your host to a live ISO, the Ironic Python Agent does the following actions: It runs the steps in the Bare Metal installer-provisioned-infrastructure. It starts the Assisted Installer agent, and the agent handles the rest of the install and provisioning process. If the Assisted Installer agent starts slowly and you deploy a managed cluster, the managed cluster might become stuck in the Pending status and not have any agent resources. You can work around the issue by disabling the converged flow. Important: When you disable the converged flow, only the Assisted Installer agent runs in the live ISO, reducing the number of open ports and disabling any features you enabled with the Ironic Python Agent agent, including the following: Pre-provisioning disk cleaning iPXE boot firmware BIOS configuration To decide what port numbers you want to enable or disable without disabling the converged flow, see Network configuration . To disable the converged flow, complete the following steps: Create the following ConfigMap on the hub cluster: apiVersion: v1 kind: ConfigMap metadata: name: my-assisted-service-config namespace: multicluster-engine data: ALLOW_CONVERGED_FLOW: "false" 1 1 When you set the parameter value to "false", you also disable any features enabled by the Ironic Python Agent. Apply the ConfigMap by running the following command: 1.1.2.1.29. ManagedClusterSet API specification limitation The selectorType: LaberSelector setting is not supported when using the Clustersets API . The selectorType: ExclusiveClusterSetLabel setting is supported. 1.1.2.1.30. Hub cluster communication limitations The following limitations occur if the hub cluster is not able to reach or communicate with the managed cluster: You cannot create a new managed cluster by using the console. You are still able to import a managed cluster manually by using the command line interface or by using the Run import commands manually option in the console. If you deploy an Application or ApplicationSet by using the console, or if you import a managed cluster into ArgoCD, the hub cluster ArgoCD controller calls the managed cluster API server. You can use AppSub or the ArgoCD pull model to work around the issue. The console page for pod logs does not work, and an error message that resembles the following appears: 1.1.2.1.31. installNamespace field can only have one value When enabling the managed-serviceaccount add-on, the installNamespace field in the ManagedClusterAddOn resource must have open-cluster-management-agent-addon as the value. Other values are ignored. The managed-serviceaccount add-on agent is always deployed in the open-cluster-management-agent-addon namespace on the managed cluster. 1.1.2.1.32. tolerations and nodeSelector settings do not affect the managed-serviceaccount agent The tolerations and nodeSelector settings configured on the MultiClusterEngine and MultiClusterHub resources do not affect the managed-serviceaccount agent deployed on the local cluster. The managed-serviceaccount add-on is not always required on the local cluster. If the managed-serviceaccount add-on is required, you can work around the issue by completing the following steps: Create the addonDeploymentConfig custom resource. Set the tolerations and nodeSelector values for the local cluster and managed-serviceaccount agent. Update the managed-serviceaccount ManagedClusterAddon in the local cluster namespace to use the addonDeploymentConfig custom resource you created. See Configuring nodeSelectors and tolerations for klusterlet add-ons to learn more about how to use the addonDeploymentConfig custom resource to configure tolerations and nodeSelector for add-ons. 1.1.2.1.33. Bulk destroy option on KubeVirt hosted cluster does not destroy hosted cluster Using the bulk destroy option in the console on KubeVirt hosted clusters does not destroy the KubeVirt hosted clusters. Use the row action drop-down menu to destroy the KubeVirt hosted cluster instead. 1.1.2.1.34. The Cluster curator does not support OpenShift Container Platform Dedicated clusters When you upgrade an OpenShift Container Platform Dedicated cluster by using the ClusterCurator resource, the upgrade fails because the Cluster curator does not support OpenShift Container Platform Dedicated clusters. 1.1.2.1.35. Custom ingress domain is not applied correctly You can specify a custom ingress domain by using the ClusterDeployment resource while installing a managed cluster, but the change is only applied after the installation by using the SyncSet resource. As a result, the spec field in the clusterdeployment.yaml file displays the custom ingress domain you specified, but the status still displays the default domain. 1.1.2.1.36. A single-node OpenShift cluster installation requires a matching OpenShift Container Platform with infrastructure operator for Red Hat OpenShift If you want to install a single-node OpenShift cluster with an Red Hat OpenShift Container Platform version before 4.16, your InfraEnv custom resource and your booted host must use the same OpenShift Container Platform version that you are using to install the single-node OpenShift cluster. The installation fails if the versions do not match. To work around the issue, edit your InfraEnv resource before you boot a host with the Discovery ISO, and include the following content: apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv spec: osImageVersion: 4.15 The osImageVersion field must match the Red Hat OpenShift Container Platform cluster version that you want to install. 1.1.2.2. Hosted control planes 1.1.2.2.1. Console displays hosted cluster as Pending import If the annotation and ManagedCluster name do not match, the console displays the cluster as Pending import . The cluster cannot be used by the multicluster engine operator. The same issue happens when there is no annotation and the ManagedCluster name does not match the Infra-ID value of the HostedCluster resource." 1.1.2.2.2. Console might list the same version multiple times when adding a node pool to a hosted cluster When you use the console to add a new node pool to an existing hosted cluster, the same version of OpenShift Container Platform might appear more than once in the list of options. You can select any instance in the list for the version that you want. 1.1.2.2.3. The web console lists nodes even after they are removed from the cluster and returned to the infrastructure environment When a node pool is scaled down to 0 workers, the list of hosts in the console still shows nodes in a Ready state. You can verify the number of nodes in two ways: In the console, go to the node pool and verify that it has 0 nodes. On the command line interface, run the following commands: Verify that 0 nodes are in the node pool by running the following command: oc get nodepool -A Verify that 0 nodes are in the cluster by running the following command: oc get nodes --kubeconfig Verify that 0 agents are reported as bound to the cluster by running the following command: oc get agents -A 1.1.2.2.4. Potential DNS issues in hosted clusters configured for a dual-stack network When you create a hosted cluster in an environment that uses the dual-stack network, you might encounter the following DNS-related issues: CrashLoopBackOff state in the service-ca-operator pod: When the pod tries to reach the Kubernetes API server through the hosted control plane, the pod cannot reach the server because the data plane proxy in the kube-system namespace cannot resolve the request. This issue occurs because in the HAProxy setup, the front end uses an IP address and the back end uses a DNS name that the pod cannot resolve. Pods stuck in ContainerCreating state: This issue occurs because the openshift-service-ca-operator cannot generate the metrics-tls secret that the DNS pods need for DNS resolution. As a result, the pods cannot resolve the Kubernetes API server. To resolve those issues, configure the DNS server settings by following the guidelines in Configuring DNS for a dual stack network . 1.1.2.2.5. IBM Z hosts restart in a loop In hosted control planes on the IBM Z platform, when you unbind the hosts with the cluster, the hosts restart in loop and are not ready to be used. For a workaround for this issue, see Destroying a hosted cluster on x86 bare metal with IBM Z compute nodes . 1.1.3. Errata updates For multicluster engine operator, the Errata updates are automatically applied when released. If no release notes are listed, the product does not have an Errata release at this time. Important: For reference, Jira links and Jira numbers might be added to the content and used internally. Links that require access might not be available for the user. 1.1.3.1. Errata 2.6.6 Delivers updates to one or more product container images. 1.1.3.2. Errata 2.6.5 Delivers updates to one or more product container images. 1.1.3.3. Errata 2.6.4 Delivers updates to one or more product container images. Stops the Detach clusters action from deleting hosted clusters. ( ACM-15058 ) Keeps the cluster-proxy-addon from getting stuck in the Progressing state. ( ACM-14853 ) 1.1.3.4. Errata 2.6.3 Delivers updates to one or more product container images. 1.1.3.5. Errata 2.6.2 Delivers updates to one or more product container images. Fixes an issue where validation prevented entering an AWS instance type containing hyphens during cluster creation. ( ACM-13036 ) 1.1.3.6. Errata 2.6.1 Delivers updates to one or more product container images. Fixes an issue where validation prevented entering an AWS instance type containing hyphens during cluster creation. ( ACM-13036 1.1.4. Cluster lifecycle deprecations and removals Learn when parts of the product are deprecated or removed from multicluster engine operator. Consider the alternative actions in the Recommended action and details, which display in the tables for the current release and for two prior releases. Tables are removed if no entries are added for that section this release. Deprecated: multicluster engine operator 2.3 and earlier versions are no longer supported. The documentation might remain available, but without any Errata or other updates. Best practice: Upgrade to the most recent version. 1.1.4.1. API deprecations and removals multicluster engine operator follows the Kubernetes deprecation guidelines for APIs. See the Kubernetes Deprecation Policy for more details about that policy. multicluster engine operator APIs are only deprecated or removed outside of the following timelines: All V1 APIs are generally available and supported for 12 months or three releases, whichever is greater. V1 APIs are not removed, but can be deprecated outside of that time limit. All beta APIs are generally available for nine months or three releases, whichever is greater. Beta APIs are not removed outside of that time limit. All alpha APIs are not required to be supported, but might be listed as deprecated or removed if it benefits users. 1.1.4.1.1. API deprecations Product or category Affected item Version Recommended action More details and links ManagedServiceAccount The v1alpha1 API is upgraded to v1beta1 because v1alpha1 is deprecated. 2.4 Use v1beta1 . None 1.1.4.2. Deprecations A deprecated component, feature, or service is supported, but no longer recommended for use and might become obsolete in future releases. Consider the alternative actions in the Recommended action and details that are provided in the following table: Product or category Affected item Version Recommended action More details and links Hosted control planes The --aws-creds flag is deprecated. 2.6 Use the --sts-creds and --role-arn flags in the hcp command line interface. None 1.1.4.3. Removals A removed item is typically function that was deprecated in releases and is no longer available in the product. You must use alternatives for the removed function. Consider the alternative actions in the Recommended action and details that are provided in the following table: Product or category Affected item Version Recommended action More details and links Cluster lifecycle Create cluster on Red Hat Virtualization 2.6 None None Cluster lifecycle Klusterlet Operator Lifecycle Manager Operator 2.6 None None 1.2. About cluster lifecycle with multicluster engine operator The multicluster engine for Kubernetes operator is the cluster lifecycle operator that provides cluster management capabilities for Red Hat OpenShift Container Platform and Red Hat Advanced Cluster Management hub clusters. If you installed Red Hat Advanced Cluster Management, you do not need to install multicluster engine operator, as it is automatically installed. See the multicluster engine operator Support matrix to learn about hub cluster and managed cluster requirements and support. for support information, as well as the following documentation: Console overview multicluster engine for Kubernetes operator Role-based access control Network configuration To continue, see the remaining cluster lifecyle documentation at Cluster lifecycle with multicluster engine operator overview . 1.2.1. Console overview OpenShift Container Platform console plug-ins are available with the OpenShift Container Platform web console and can be integrated. To use this feature, the console plug-ins must remain enabled. The multicluster engine operator displays certain console features from Infrastructure and Credentials navigation items. If you install Red Hat Advanced Cluster Management, you see more console capability. Note: With the plug-ins enabled, you can access Red Hat Advanced Cluster Management within the OpenShift Container Platform console from the cluster switcher by selecting All Clusters from the drop-down menu. To disable the plug-in, be sure you are in the Administrator perspective in the OpenShift Container Platform console. Find Administration in the navigation and click Cluster Settings , then click Configuration tab. From the list of Configuration resources , click the Console resource with the operator.openshift.io API group, which contains cluster-wide configuration for the web console. Click on the Console plug-ins tab. The mce plug-in is listed. Note: If Red Hat Advanced Cluster Management is installed, it is also listed as acm . Modify plug-in status from the table. In a few moments, you are prompted to refresh the console. 1.2.2. multicluster engine operator role-based access control RBAC is validated at the console level and at the API level. Actions in the console can be enabled or disabled based on user access role permissions. View the following sections for more information on RBAC for specific lifecycles in the product: Overview of roles Cluster lifecycle RBAC Cluster pools RBAC Console and API RBAC table for cluster lifecycle Credentials role-based access control 1.2.2.1. Overview of roles Some product resources are cluster-wide and some are namespace-scoped. You must apply cluster role bindings and namespace role bindings to your users for consistent access controls. View the table list of the following role definitions that are supported: 1.2.2.1.1. Table of role definition Role Definition cluster-admin This is an OpenShift Container Platform default role. A user with cluster binding to the cluster-admin role is an OpenShift Container Platform super user, who has all access. open-cluster-management:cluster-manager-admin A user with cluster binding to the open-cluster-management:cluster-manager-admin role is a super user, who has all access. This role allows the user to create a ManagedCluster resource. open-cluster-management:admin:<managed_cluster_name> A user with cluster binding to the open-cluster-management:admin:<managed_cluster_name> role has administrator access to the ManagedCluster resource named, <managed_cluster_name> . When a user has a managed cluster, this role is automatically created. open-cluster-management:view:<managed_cluster_name> A user with cluster binding to the open-cluster-management:view:<managed_cluster_name> role has view access to the ManagedCluster resource named, <managed_cluster_name> . open-cluster-management:managedclusterset:admin:<managed_clusterset_name> A user with cluster binding to the open-cluster-management:managedclusterset:admin:<managed_clusterset_name> role has administrator access to ManagedCluster resource named <managed_clusterset_name> . The user also has administrator access to managedcluster.cluster.open-cluster-management.io , clusterclaim.hive.openshift.io , clusterdeployment.hive.openshift.io , and clusterpool.hive.openshift.io resources, which has the managed cluster set labels: cluster.open-cluster-management.io and clusterset=<managed_clusterset_name> . A role binding is automatically generated when you are using a cluster set. See Creating a ManagedClusterSet to learn how to manage the resource. open-cluster-management:managedclusterset:view:<managed_clusterset_name> A user with cluster binding to the open-cluster-management:managedclusterset:view:<managed_clusterset_name> role has view access to the ManagedCluster resource named, <managed_clusterset_name>`. The user also has view access to managedcluster.cluster.open-cluster-management.io , clusterclaim.hive.openshift.io , clusterdeployment.hive.openshift.io , and clusterpool.hive.openshift.io resources, which has the managed cluster set labels: cluster.open-cluster-management.io , clusterset=<managed_clusterset_name> . For more details on how to manage managed cluster set resources, see Creating a ManagedClusterSet . admin, edit, view Admin, edit, and view are OpenShift Container Platform default roles. A user with a namespace-scoped binding to these roles has access to open-cluster-management resources in a specific namespace, while cluster-wide binding to the same roles gives access to all of the open-cluster-management resources cluster-wide. Important : Any user can create projects from OpenShift Container Platform, which gives administrator role permissions for the namespace. If a user does not have role access to a cluster, the cluster name is not visible. The cluster name is displayed with the following symbol: - . RBAC is validated at the console level and at the API level. Actions in the console can be enabled or disabled based on user access role permissions. View the following sections for more information on RBAC for specific lifecycles in the product. 1.2.2.2. Cluster lifecycle RBAC View the following cluster lifecycle RBAC operations: Create and administer cluster role bindings for all managed clusters. For example, create a cluster role binding to the cluster role open-cluster-management:cluster-manager-admin by entering the following command: This role is a super user, which has access to all resources and actions. You can create cluster-scoped managedcluster resources, the namespace for the resources that manage the managed cluster, and the resources in the namespace with this role. You might need to add the username of the ID that requires the role association to avoid permission errors. Run the following command to administer a cluster role binding for a managed cluster named cluster-name : This role has read and write access to the cluster-scoped managedcluster resource. This is needed because the managedcluster is a cluster-scoped resource and not a namespace-scoped resource. Create a namespace role binding to the cluster role admin by entering the following command: This role has read and write access to the resources in the namespace of the managed cluster. Create a cluster role binding for the open-cluster-management:view:<cluster-name> cluster role to view a managed cluster named cluster-name Enter the following command: This role has read access to the cluster-scoped managedcluster resource. This is needed because the managedcluster is a cluster-scoped resource. Create a namespace role binding to the cluster role view by entering the following command: This role has read-only access to the resources in the namespace of the managed cluster. View a list of the managed clusters that you can access by entering the following command: This command is used by administrators and users without cluster administrator privileges. View a list of the managed cluster sets that you can access by entering the following command: This command is used by administrators and users without cluster administrator privileges. 1.2.2.2.1. Cluster pools RBAC View the following cluster pool RBAC operations: As a cluster administrator, use cluster pool provision clusters by creating a managed cluster set and grant administrator permission to roles by adding the role to the group. View the following examples: Grant admin permission to the server-foundation-clusterset managed cluster set with the following command: Grant view permission to the server-foundation-clusterset managed cluster set with the following command: Create a namespace for the cluster pool, server-foundation-clusterpool . View the following examples to grant role permissions: Grant admin permission to server-foundation-clusterpool for the server-foundation-team-admin by running the following commands: As a team administrator, create a cluster pool named ocp46-aws-clusterpool with a cluster set label, cluster.open-cluster-management.io/clusterset=server-foundation-clusterset in the cluster pool namespace: The server-foundation-webhook checks if the cluster pool has the cluster set label, and if the user has permission to create cluster pools in the cluster set. The server-foundation-controller grants view permission to the server-foundation-clusterpool namespace for server-foundation-team-user . When a cluster pool is created, the cluster pool creates a clusterdeployment . Continue reading for more details: The server-foundation-controller grants admin permission to the clusterdeployment namespace for server-foundation-team-admin . The server-foundation-controller grants view permission clusterdeployment namespace for server-foundation-team-user . Note: As a team-admin and team-user , you have admin permission to the clusterpool , clusterdeployment , and clusterclaim . 1.2.2.2.2. Console and API RBAC table for cluster lifecycle View the following console and API RBAC tables for cluster lifecycle: Table 1.1. Console RBAC table for cluster lifecycle Resource Admin Edit View Clusters read, update, delete - read Cluster sets get, update, bind, join edit role not mentioned get Managed clusters read, update, delete no edit role mentioned get Provider connections create, read, update, and delete - read Table 1.2. API RBAC table for cluster lifecycle API Admin Edit View managedclusters.cluster.open-cluster-management.io You can use mcl (singular) or mcls (plural) in commands for this API. create, read, update, delete read, update read managedclusters.view.open-cluster-management.io You can use mcv (singular) or mcvs (plural) in commands for this API. read read read managedclusters.register.open-cluster-management.io/accept update update managedclusterset.cluster.open-cluster-management.io You can use mclset (singular) or mclsets (plural) in commands for this API. create, read, update, delete read, update read managedclustersets.view.open-cluster-management.io read read read managedclustersetbinding.cluster.open-cluster-management.io You can use mclsetbinding (singular) or mclsetbindings (plural) in commands for this API. create, read, update, delete read, update read klusterletaddonconfigs.agent.open-cluster-management.io create, read, update, delete read, update read managedclusteractions.action.open-cluster-management.io create, read, update, delete read, update read managedclusterviews.view.open-cluster-management.io create, read, update, delete read, update read managedclusterinfos.internal.open-cluster-management.io create, read, update, delete read, update read manifestworks.work.open-cluster-management.io create, read, update, delete read, update read submarinerconfigs.submarineraddon.open-cluster-management.io create, read, update, delete read, update read placements.cluster.open-cluster-management.io create, read, update, delete read, update read 1.2.2.2.3. Credentials role-based access control The access to credentials is controlled by Kubernetes. Credentials are stored and secured as Kubernetes secrets. The following permissions apply to accessing secrets in Red Hat Advanced Cluster Management for Kubernetes: Users with access to create secrets in a namespace can create credentials. Users with access to read secrets in a namespace can also view credentials. Users with the Kubernetes cluster roles of admin and edit can create and edit secrets. Users with the Kubernetes cluster role of view cannot view secrets because reading the contents of secrets enables access to service account credentials. 1.2.3. Network configuration Configure your network settings to allow the connections. Important: The trusted CA bundle is available in the multicluster engine operator namespace, but that enhancement requires changes to your network. The trusted CA bundle ConfigMap uses the default name of trusted-ca-bundle . You can change this name by providing it to the operator in an environment variable named TRUSTED_CA_BUNDLE . See Configuring the cluster-wide proxy in the Networking section of Red Hat OpenShift Container Platform for more information. Note: Registration Agent and Work Agent on the managed cluster do not support proxy settings because they communicate with apiserver on the hub cluster by establishing an mTLS connection, which cannot pass through the proxy. For the multicluster engine operator cluster networking requirements, see the following table: Direction Protocol Connection Port (if specified) Outbound Kubernetes API server of the provisioned managed cluster 6443 Outbound from the OpenShift Container Platform managed cluster to the hub cluster TCP Communication between the Ironic Python Agent and the bare metal operator on the hub cluster 6180, 6183, 6385, and 5050 Outbound from the hub cluster to the Ironic Python Agent on the managed cluster TCP Communication between the bare metal node where the Ironic Python Agent is running and the Ironic conductor service 9999 Outbound and inbound The WorkManager service route on the managed cluster 443 Inbound The Kubernetes API server of the multicluster engine for Kubernetes operator cluster from the managed cluster 6443 Note: The managed cluster must be able to reach the hub cluster control plane node IP addresses. 1.3. Installing and upgrading multicluster engine operator The multicluster engine operator is a software operator that enhances cluster fleet management. The multicluster engine operator supportsRed Hat OpenShift Container Platform and Kubernetes cluster lifecycle management across clouds and data centers. Deprecated: multicluster engine operator 2.3 and earlier versions are no longer supported. The documentation might remain available, but without any Errata or other updates. Best practice: Upgrade to the most recent version. The documentation references the earliest supported OpenShift Container Platform version, unless a specific component or function is introduced and tested only on a more recent version of OpenShift Container Platform. For full support information, see the multicluster engine operator Support matrix . For life cycle information, see Red Hat OpenShift Container Platform Life Cycle policy . Important: If you are using Red Hat Advanced Cluster Management version 2.5 or later, then multicluster engine for Kubernetes operator is already installed on the cluster. See the following documentation: Installing while connected online Configuring infrastructure nodes for multicluster engine operator Installing on disconnected networks Uninstalling Network configuration Upgrading disconnected clusters using policies MultiClusterEngine advanced configuration Red Hat Advanced Cluster Management integration 1.3.1. Installing while connected online The multicluster engine operator is installed with Operator Lifecycle Manager, which manages the installation, upgrade, and removal of the components that encompass the multicluster engine operator. Required access: Cluster administrator Important: You cannot install multicluster engine operator on a cluster that has a ManagedCluster resource configured in an external cluster. You must remove the ManagedCluster resource from the external cluster before you can install multicluster engine operator. For OpenShift Container Platform Dedicated environment, you must have cluster-admin permissions. By default dedicated-admin role does not have the required permissions to create namespaces in the OpenShift Container Platform Dedicated environment. By default, the multicluster engine operator components are installed on worker nodes of your OpenShift Container Platform cluster without any additional configuration. You can install multicluster engine operator onto worker nodes by using the OpenShift Container Platform OperatorHub web console interface, or by using the OpenShift Container Platform CLI. If you have configured your OpenShift Container Platform cluster with infrastructure nodes, you can install multicluster engine operator onto those infrastructure nodes by using the OpenShift Container Platform CLI with additional resource parameters. See the Installing multicluster engine on infrastructure nodes section for those details. If you plan to import Kubernetes clusters that were not created by OpenShift Container Platform or multicluster engine for Kubernetes operator, you will need to configure an image pull secret. For information on how to configure an image pull secret and other advanced configurations, see options in the Advanced configuration section of this documentation. Prerequisites Confirm your OpenShift Container Platform installation Installing from the OperatorHub web console interface Installing from the OpenShift Container Platform CLI 1.3.1.1. Prerequisites Before you install multicluster engine for Kubernetes operator, see the following requirements: Your Red Hat OpenShift Container Platform cluster must have access to the multicluster engine operator in the OperatorHub catalog from the OpenShift Container Platform console. You need access to the catalog.redhat.com . Your cluster does not have a ManagedCluster resource configured in an external cluster. You need access to the catalog.redhat.com . A supported version of OpenShift Container Platform must be deployed in your environment, and you must be logged into with the OpenShift Container Platform CLI. See the following install documentation: OpenShift Container Platform Installing Your OpenShift Container Platform command line interface (CLI) must be configured to run oc commands. See Getting started with the CLI for information about installing and configuring the OpenShift Container Platform CLI. Your OpenShift Container Platform permissions must allow you to create a namespace. You must have an Internet connection to access the dependencies for the operator. To install in a OpenShift Container Platform Dedicated environment, see the following: You must have the OpenShift Container Platform Dedicated environment configured and running. You must have cluster-admin authority to the OpenShift Container Platform Dedicated environment where you are installing the engine. If you plan to create managed clusters by using the Assisted Installer that is provided with Red Hat OpenShift Container Platform, see Preparing to install with the Assisted Installer topic in the OpenShift Container Platform documentation for the requirements. 1.3.1.2. Confirm your OpenShift Container Platform installation You must have a supported OpenShift Container Platform version, including the registry and storage services, installed and working. For more information about installing OpenShift Container Platform, see the OpenShift Container Platform documentation. Verify that multicluster engine operator is not already installed on your OpenShift Container Platform cluster. The multicluster engine operator allows only one single installation on each OpenShift Container Platform cluster. Continue with the following steps if there is no installation. To ensure that the OpenShift Container Platform cluster is set up correctly, access the OpenShift Container Platform web console with the following command: See the following example output: Open the URL in your browser and check the result. If the console URL displays console-openshift-console.router.default.svc.cluster.local , set the value for openshift_master_default_subdomain when you install OpenShift Container Platform. See the following example of a URL: https://console-openshift-console.apps.new-coral.purple-chesterfield.com . You can proceed to install multicluster engine operator. 1.3.1.3. Installing from the OperatorHub web console interface Best practice: From the Administrator view in your OpenShift Container Platform navigation, install the OperatorHub web console interface that is provided with OpenShift Container Platform. Select Operators > OperatorHub to access the list of available operators, and select multicluster engine for Kubernetes operator. Click Install . On the Operator Installation page, select the options for your installation: Namespace: The multicluster engine operator engine must be installed in its own namespace, or project. By default, the OperatorHub console installation process creates a namespace titled multicluster-engine . Best practice: Continue to use the multicluster-engine namespace if it is available. If there is already a namespace named multicluster-engine , select a different namespace. Channel: The channel that you select corresponds to the release that you are installing. When you select the channel, it installs the identified release, and establishes that the future errata updates within that release are obtained. Approval strategy: The approval strategy identifies the human interaction that is required for applying updates to the channel or release to which you subscribed. Select Automatic , which is selected by default, to ensure any updates within that release are automatically applied. Select Manual to receive a notification when an update is available. If you have concerns about when the updates are applied, this might be best practice for you. Note: To upgrade to the minor release, you must return to the OperatorHub page and select a new channel for the more current release. Select Install to apply your changes and create the operator. See the following process to create the MultiClusterEngine custom resource. In the OpenShift Container Platform console navigation, select Installed Operators > multicluster engine for Kubernetes . Select the MultiCluster Engine tab. Select Create MultiClusterEngine . Update the default values in the YAML file. See options in the MultiClusterEngine advanced configuration section of the documentation. The following example shows the default template that you can copy into the editor: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: {} Select Create to initialize the custom resource. It can take up to 10 minutes for the multicluster engine operator engine to build and start. After the MultiClusterEngine resource is created, the status for the resource is Available on the MultiCluster Engine tab. 1.3.1.4. Installing from the OpenShift Container Platform CLI Create a multicluster engine operator engine namespace where the operator requirements are contained. Run the following command, where namespace is the name for your multicluster engine for Kubernetes operator namespace. The value for namespace might be referred to as Project in the OpenShift Container Platform environment: Switch your project namespace to the one that you created. Replace namespace with the name of the multicluster engine for Kubernetes operator namespace that you created in step 1. Create a YAML file to configure an OperatorGroup resource. Each namespace can have only one operator group. Replace default with the name of your operator group. Replace namespace with the name of your project namespace. See the following example: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: <default> namespace: <namespace> spec: targetNamespaces: - <namespace> Run the following command to create the OperatorGroup resource. Replace operator-group with the name of the operator group YAML file that you created: Create a YAML file to configure an OpenShift Container Platform Subscription. Your file should look similar to the following example: apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: multicluster-engine spec: sourceNamespace: openshift-marketplace source: redhat-operators channel: stable-2.6 installPlanApproval: Automatic name: multicluster-engine Note: To configure infrastructure nodes, see Configuring infrastructure nodes for multicluster engine operator . Run the following command to create the OpenShift Container Platform Subscription. Replace subscription with the name of the subscription file that you created: Create a YAML file to configure the MultiClusterEngine custom resource. Your default template should look similar to the following example: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: {} Note: For installing the multicluster engine operator on infrastructure nodes, see the MultiClusterEngine custom resource additional configuration section: Run the following command to create the MultiClusterEngine custom resource. Replace custom-resource with the name of your custom resource file: If this step fails with the following error, the resources are still being created and applied. Run the command again in a few minutes when the resources are created: Run the following command to get the custom resource. It can take up to 10 minutes for the MultiClusterEngine custom resource status to display as Available in the status.phase field after you run the following command: If you are reinstalling the multicluster engine operator and the pods do not start, see Troubleshooting reinstallation failure for steps to work around this problem. Notes: A ServiceAccount with a ClusterRoleBinding automatically gives cluster administrator privileges to multicluster engine operator and to any user credentials with access to the namespace where you install multicluster engine operator. 1.3.2. Configuring infrastructure nodes for multicluster engine operator Configure your OpenShift Container Platform cluster to contain infrastructure nodes to run approved multicluster engine operator management components. Running components on infrastructure nodes avoids allocating OpenShift Container Platform subscription quota for the nodes that are running multicluster engine operator management components. After adding infrastructure nodes to your OpenShift Container Platform cluster, follow the Installing from the OpenShift Container Platform CLI instructions and add the following configurations to the Operator Lifecycle Manager Subscription and MultiClusterEngine custom resource. 1.3.2.1. Configuring infrastructure nodes to the OpenShift Container Platform cluster Follow the procedures that are described in Creating infrastructure machine sets in the OpenShift Container Platform documentation. Infrastructure nodes are configured with a Kubernetes taints and labels to keep non-management workloads from running on them. To be compatible with the infrastructure node enablement provided by multicluster engine operator, ensure your infrastructure nodes have the following taints and labels applied: metadata: labels: node-role.kubernetes.io/infra: "" spec: taints: - effect: NoSchedule key: node-role.kubernetes.io/infra 1.3.2.2. Operator Lifecycle Manager subscription configuration Add the following additional configuration before applying the Operator Lifecycle Manager Subscription: spec: config: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - key: node-role.kubernetes.io/infra effect: NoSchedule operator: Exists 1.3.2.3. MultiClusterEngine custom resource additional configuration Add the following additional configuration before applying the MultiClusterEngine custom resource: spec: nodeSelector: node-role.kubernetes.io/infra: "" 1.3.3. Install on disconnected networks You might need to install the multicluster engine operator on Red Hat OpenShift Container Platform clusters that are not connected to the Internet. The procedure to install on a disconnected engine requires some of the same steps as the connected installation. Important: You must install multicluster engine operator on a cluster that does not have Red Hat Advanced Cluster Management for Kubernetes earlier than 2.5 installed. The multicluster engine operator cannot co-exist with Red Hat Advanced Cluster Management for Kubernetes on versions earlier than 2.5 because they provide some of the same management components. It is recommended that you install multicluster engine operator on a cluster that has never previously installed Red Hat Advanced Cluster Management. If you are using Red Hat Advanced Cluster Management for Kubernetes at version 2.5.0 or later then multicluster engine operator is already installed on the cluster with it. You must download copies of the packages to access them during the installation, rather than accessing them directly from the network during the installation. Prerequisites Confirm your OpenShift Container Platform installation Installing in a disconnected environment 1.3.3.1. Prerequisites You must meet the following requirements before you install The multicluster engine operator: A supported OpenShift Container Platform version must be deployed in your environment, and you must be logged in with the command line interface (CLI). You need access to catalog.redhat.com . Note: For managing bare metal clusters, you need a supported OpenShift Container Platform version. See the OpenShift Container Platform Installing . Your Red Hat OpenShift Container Platform permissions must allow you to create a namespace. You must have a workstation with Internet connection to download the dependencies for the operator. 1.3.3.2. Confirm your OpenShift Container Platform installation You must have a supported OpenShift Container Platform version, including the registry and storage services, installed and working in your cluster. For information about OpenShift Container Platform, see OpenShift Container Platform documentation . When and if you are connected, accessing the OpenShift Container Platform web console with the following command to verify: See the following example output: The console URL in this example is: https:// console-openshift-console.apps.new-coral.purple-chesterfield.com . Open the URL in your browser and check the result. If the console URL displays console-openshift-console.router.default.svc.cluster.local , set the value for openshift_master_default_subdomain when you install OpenShift Container Platform. 1.3.3.3. Installing in a disconnected environment Important: You need to download the required images to a mirroring registry to install the operators in a disconnected environment. Without the download, you might receive ImagePullBackOff errors during your deployment. Follow these steps to install the multicluster engine operator in a disconnected environment: Create a mirror registry. If you do not already have a mirror registry, create one by completing the procedure in the Disconnected installation mirroring topic of the Red Hat OpenShift Container Platform documentation. If you already have a mirror registry, you can configure and use your existing one. Note: For bare metal only, you need to provide the certificate information for the disconnected registry in your install-config.yaml file. To access the image in a protected disconnected registry, you must provide the certificate information so the multicluster engine operator can access the registry. Copy the certificate information from the registry. Open the install-config.yaml file in an editor. Find the entry for additionalTrustBundle: | . Add the certificate information after the additionalTrustBundle line. The resulting content should look similar to the following example: additionalTrustBundle: | -----BEGIN CERTIFICATE----- certificate_content -----END CERTIFICATE----- sshKey: >- Important: Additional mirrors for disconnected image registries are needed if the following Governance policies are required: Container Security Operator policy: Locate the images in the registry.redhat.io/quay source. Compliance Operator policy: Locate the images in the registry.redhat.io/compliance source. Gatekeeper Operator policy: Locate the images in the registry.redhat.io/gatekeeper source. See the following example of mirrors lists for all three operators: - mirrors: - <your_registry>/rhacm2 source: registry.redhat.io/rhacm2 - mirrors: - <your_registry>/quay source: registry.redhat.io/quay - mirrors: - <your_registry>/compliance source: registry.redhat.io/compliance Save the install-config.yaml file. Create a YAML file that contains the ImageContentSourcePolicy with the name mce-policy.yaml . Note: If you modify this on a running cluster, it causes a rolling restart of all nodes. apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: name: mce-repo spec: repositoryDigestMirrors: - mirrors: - mirror.registry.com:5000/multicluster-engine source: registry.redhat.io/multicluster-engine Apply the ImageContentSourcePolicy file by entering the following command: Enable the disconnected Operator Lifecycle Manager Red Hat Operators and Community Operators. the multicluster engine operator is included in the Operator Lifecycle Manager Red Hat Operator catalog. Configure the disconnected Operator Lifecycle Manager for the Red Hat Operator catalog. Follow the steps in the Using Operator Lifecycle Manager on restricted networks topic of theRed Hat OpenShift Container Platform documentation. Continue to install the multicluster engine operator for Kubernetes from the Operator Lifecycle Manager catalog. See Installing while connected online for the required steps. 1.3.4. Upgrading disconnected clusters by using policies If you have the Red Hat Advanced Cluster Management for Kubernetes hub cluster, which uses the MultiClusterHub operator to manage, upgrade, and install hub cluster components, you can use OpenShift Update Service with Red Hat Advanced Cluster Management policies to upgrade multiple clusters in a disconnected environment. OpenShift Update Service is a separate operator and operand that monitors the available versions of your managed clusters and makes them available for upgrading in a disconnected environment. OpenShift Update Service can perform the following actions: Monitor when upgrades are available for your disconnected clusters. Identify which updates are mirrored to your local site for upgrading by using the graph data file. Notify you that an upgrade is available for your cluster by using the console. Prerequisites Prepare your disconnected mirror registry Deploy the operator for OpenShift Update Service Build the graph data init container Configuring the certificate for the mirrored registry Deploy the OpenShift Update Service instance Optional: Deploying a policy to override the default registry Deploying a policy to deploy a disconnected catalog source Deploying a policy to change the managed cluster parameter Viewing available upgrades Selecting a channel Upgrading the cluster Additional resources See Configuring additional trust stores for image registry access in the OpenShift Container Platform documentation to learn more about the external registry CA certificate. 1.3.4.1. Prerequisites You must have the following prerequisites before you can use OpenShift Update Service to upgrade your disconnected clusters: You need to install Red Hat Advanced Cluster Management. See the Red Hat Advanced Cluster Management Installing and upgrading documentation. You need a hub cluster that is running on a supported Red Hat OpenShift Container Platform version with restricted OLM configured. See Using Operator Lifecycle Manager on restricted networks for details about how to configure restricted OLM. Take note of the catalog source image when you configure restricted OLM. You need an OpenShift Container Platform cluster that the hub cluster manages. You need access credentials to a local repository where you can mirror the cluster images. See Disconnected installation mirroring for more information. Note: The image for the current version of the cluster that you upgrade must remain available as one of the mirrored images. If an upgrade fails, the cluster reverts back to the version of the cluster when you tried to upgrade. 1.3.4.2. Preparing your disconnected mirror registry You must mirror both the image that you want to upgrade to and the current image that you are upgrading from to your local mirror registry. Complete the following steps to mirror the images: Create a script file with content that resembles the following example. Replace <pull-secret> with the path to your OpenShift Container Platform pull secret: UPSTREAM_REGISTRY=quay.io PRODUCT_REPO=openshift-release-dev RELEASE_NAME=ocp-release OCP_RELEASE=4.15.2-x86_64 LOCAL_REGISTRY=USD(hostname):5000 LOCAL_SECRET_JSON=<pull-secret> oc adm -a USD{LOCAL_SECRET_JSON} release mirror \ --from=USD{UPSTREAM_REGISTRY}/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE} \ --to=USD{LOCAL_REGISTRY}/ocp4 \ --to-release-image=USD{LOCAL_REGISTRY}/ocp4/release:USD{OCP_RELEASE} Run the script to mirror the images, configure settings, and separate the release images from the release content. 1.3.4.3. Deploying the operator for OpenShift Update Service To deploy the operator for OpenShift Update Service in your OpenShift Container Platform environment, complete the following steps: On your hub cluster, access the OpenShift Container Platform operator hub. Deploy the operator by selecting OpenShift Update Service Operator and update the default values if needed. The deployment of the operator creates a new project named openshift-update-service . Wait for the installation of the operator to finish. You can check the status of the installation by running the oc get pods command. Verify that the operator is in the running state. 1.3.4.4. Building the graph data init container OpenShift Update Service uses graph data information to find the available upgrades. In a connected environment, OpenShift Update Service pulls the graph data information for available upgrades directly from the update-service graph data GitHub repository . In a disconnected environment, you must make the graph data available in a local repository by using an init container . Complete the following steps to create a graph data init container : Clone the graph data Git repository by running the following command: git clone https://github.com/openshift/cincinnati-graph-data Create a file that has the information for your graph data init . You can find a sample Dockerfile in the cincinnati-operator GitHub repository. The FROM value is the external registry where OpenShift Update Service finds the images. The RUN commands create the directory and package the upgrade files. The CMD command copies the package file to the local repository and extracts the files for an upgrade. Run the following command to build the graph data init container : podman build -f <docker-path> -t <graph-path>:latest Replace <docker-path> with the path to the file that you created in the step. Replace <graph-path> with the path to your local graph data init container. Run the following command to push the graph data init container : podman push <graph-path>:latest --authfile=<pull-secret>.json Replace <graph-path> with the path to your local graph data init container. Replace <pull-secret> with the path to your pull secret file. Optional: If you do not have podman installed, replace podman with docker in step three and four. 1.3.4.5. Configuring the certificate for the mirrored registry If you are using a secure external container registry to store your mirrored OpenShift Container Platform release images, OpenShift Update Service requires access to this registry to build an upgrade graph. Complete the following steps to configure your CA certificate to work with the OpenShift Update Service pod: Find the OpenShift Container Platform external registry API, which is located in image.config.openshift.io . This is where the external registry CA certificate is stored. See Configuring additional trust stores for image registry access in the additional resources section to learn more. Create a ConfigMap in the openshift-config namespace and add your CA certificate in the updateservice-registry section. See the following example: apiVersion: v1 kind: ConfigMap metadata: name: trusted-ca data: updateservice-registry: | -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- Edit the cluster resource in the image.config.openshift.io API to set the additionalTrustedCA field to the name of the ConfigMap that you created. Run the following command and replace <trusted_ca> with the path to your new ConfigMap: The OpenShift Update Service Operator watches the image.config.openshift.io API and the ConfigMap you created in the openshift-config namespace for changes, then restarts the deployment if the CA cert has changed. 1.3.4.6. Deploying the OpenShift Update Service instance When you finish deploying the OpenShift Update Service instance on your hub cluster, the instance is located where the images for the cluster upgrades are mirrored and made available to the disconnected managed cluster. Complete the following steps to deploy the instance: If you do not want to use the default namespace of the operator, navigate to Administration > Namespaces in the console to change it. In the Installed Operators section of the OpenShift Container Platform console, select OpenShift Update Service Operator . Select Create Instance in the menu. Paste the contents from your OpenShift Update Service instance. Your YAML instance might resemble the following manifest: apiVersion: update-service.openshift.io/v1beta2 kind: update-service metadata: name: openshift-cincinnati-instance namespace: openshift-update-service spec: registry: <registry-host-name>:<port> 1 replicas: 1 repository: USD{LOCAL_REGISTRY}/ocp4/release graphDataImage: '<host-name>:<port>/cincinnati-graph-data-container' 2 1 Replace with the path to your local disconnected registry for your images. 2 Replace with the path to your graph data init container. This is the same value that you used when you ran the podman push command to push your graph data init container. Select Create to create the instance. From the hub cluster CLI, enter the oc get pods command to view the status of the instance creation. It might take a few minutes. The process is complete when the result of the command shows that the instance and the operator are running. 1.3.4.7. Optional: Deploying a policy to override the default registry The following steps only apply if you have mirrored your releases into your mirrored registry. Deprecated: PlacementRule OpenShift Container Platform has a default image registry value that specifies where it finds the upgrade packages. In a disconnected environment, you can create a policy to replace that value with the path to your local image registry where you mirrored your release images. Complete the following steps to create the policy: Log in to the OpenShift Container Platform environment of your hub cluster. From the console, select Governance > Create policy . Set the YAML switch to On to view the YAML version of the policy. Delete all of the content in the YAML code. Paste the following YAML content into the window to create a custom policy: apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-mirror namespace: default spec: disabled: false remediationAction: enforce policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-image-content-source-policy spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: name: <your-local-mirror-name> 1 spec: repositoryDigestMirrors: - mirrors: - <your-registry> 2 source: registry.redhat.io --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-mirror namespace: default placementRef: name: placement-policy-mirror kind: PlacementRule apiGroup: apps.open-cluster-management.io subjects: - name: policy-mirror kind: Policy apiGroup: policy.open-cluster-management.io --- apiVersion: apps.open-cluster-management.io/v1 kind: PlacementRule metadata: name: placement-policy-mirror namespace: default spec: clusterConditions: - status: "True" type: ManagedClusterConditionAvailable clusterSelector: matchExpressions: [] 3 1 Replace with your local mirror name. 2 Replace with the path to your local mirror repository. You can find the path to your local mirror by running the oc adm release mirror command. 3 Selects all clusters if not specified. Select Enforce if supported . Select Create to create the policy. 1.3.4.8. Deploying a policy to deploy a disconnected catalog source You can push the Catalogsource policy to the managed cluster to change the default location from a connected location to your disconnected local registry. Complete the following steps to change the default location: In the console menu, select Governance > Create policy . Set the YAML switch to On to view the YAML version of the policy. Delete all of the content in the YAML code. Paste the following YAML content into the window to create a custom policy: apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-catalog namespace: default spec: disabled: false remediationAction: enforce policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-catalog spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: config.openshift.io/v1 kind: OperatorHub metadata: name: cluster spec: disableAllDefaultSources: true - complianceType: musthave objectDefinition: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: name: my-operator-catalog namespace: openshift-marketplace spec: sourceType: grpc image: '<registry_host_name>:<port>/olm/redhat-operators:v1' 1 displayName: My Operator Catalog publisher: grpc --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-catalog namespace: default placementRef: name: placement-policy-catalog kind: PlacementRule apiGroup: apps.open-cluster-management.io subjects: - name: policy-catalog kind: Policy apiGroup: policy.open-cluster-management.io --- apiVersion: apps.open-cluster-management.io/v1 kind: PlacementRule metadata: name: placement-policy-catalog namespace: default spec: clusterConditions: - status: "True" type: ManagedClusterConditionAvailable clusterSelector: matchExpressions: [] 2 1 Replace with the path to your local restricted catalog source image. 2 Selects all clusters if not specified. Select Enforce if supported . Select Create to create the policy. 1.3.4.9. Deploying a policy to change the managed cluster parameter You can push the ClusterVersion policy to the managed cluster to change the default location where it retrieves its upgrades. Complete the following steps: From the managed cluster, confirm that the ClusterVersion upstream parameter is currently the default public OpenShift Update Service operand by running the following command: oc get clusterversion -o yaml From the hub cluster, identify the route URL to the OpenShift Update Service operand by running the following command: oc get routes Remember the result for later. In the hub cluster console menu, select Governance > Create a policy . Set the YAML switch to On to view the YAML version of the policy. Delete all of the content in the YAML code. Paste the following YAML content into the window to create a custom policy: apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-cluster-version namespace: default annotations: policy.open-cluster-management.io/standards: null policy.open-cluster-management.io/categories: null policy.open-cluster-management.io/controls: null spec: disabled: false remediationAction: enforce policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-cluster-version spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: config.openshift.io/v1 kind: ClusterVersion metadata: name: version spec: channel: stable-4.4 upstream: >- https://example-cincinnati-policy-engine-uri/api/upgrades_info/v1/graph 1 --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-cluster-version namespace: default placementRef: name: placement-policy-cluster-version kind: PlacementRule apiGroup: apps.open-cluster-management.io subjects: - name: policy-cluster-version kind: Policy apiGroup: policy.open-cluster-management.io --- apiVersion: apps.open-cluster-management.io/v1 kind: PlacementRule metadata: name: placement-policy-cluster-version namespace: default spec: clusterConditions: - status: "True" type: ManagedClusterConditionAvailable clusterSelector: matchExpressions: [] 2 1 Replace with the path to your hub cluster OpenShift Update Service operand. Selects all clusters if not specified. You can complete the following steps to determine the path to the operand: Run the oc get get routes -A command on the hub cluster. Find the route to update-service . The path to the operand is the value in the HOST/PORT field. Select Enforce if supported . Select Create to create the policy. In the managed cluster CLI, confirm that the upstream parameter in the ClusterVersion is updated with the local hub cluster OpenShift Update Service URL by running the following command: oc get clusterversion -o yaml Verify that the results resemble the following content: apiVersion: v1 items: - apiVersion: config.openshift.io/v1 kind: ClusterVersion [..] spec: channel: stable-4.4 upstream: https://<hub-cincinnati-uri>/api/upgrades_info/v1/graph 1.3.4.10. Viewing available upgrades You can view a list of available upgrades for your managed cluster by completing the following steps: From the console, select Infrastructure > Clusters . Select a cluster that is in the Ready state. From the Actions menu, select Upgrade cluster . Verify that the optional upgrade paths are available. Note: No available upgrade versions are shown if the current version is not mirrored into the local image repository. 1.3.4.11. Selecting a channel You can use the Red Hat Advanced Cluster Management console to select a channel for your cluster upgrades on OpenShift Container Platform. Those versions must be available on the mirror registry. Complete the steps in Selecting a channel to specify a channel for your upgrades. 1.3.4.12. Upgrading the cluster After configuring the disconnected registry, Red Hat Advanced Cluster Management and OpenShift Update Service use the disconnected registry to find if upgrades are available. If no available upgrades are displayed, make sure that you have the release image of the current level of the cluster and at least one later level mirrored in the local repository. If the release image for the current version of the cluster is not available, no upgrades are available. Complete the following steps to upgrade: In the console, select Infrastructure > Clusters . Find the cluster that you want to choose if there is an available upgrade. If there is an upgrade available, the Distribution version column for the cluster shows an upgrade available. Select the Options menu for the cluster, and select Upgrade cluster . Select the target version for the upgrade, and select Upgrade . If your cluster upgrade fails, the Operator generally retries the upgrade a few times, stops, and reports the status of the failing component. In some cases, the upgrade process continues to cycle through attempts to complete the process. Rolling your cluster back to a version after a failed upgrade is not supported. Contact Red Hat support for assistance if your cluster upgrade fails. 1.3.4.12.1. Additional resources See Configuring additional trust stores for image registry access in the OpenShift Container Platform documentation to learn more about the external registry CA certificate. 1.3.5. Advanced configuration The multicluster engine operator is installed using an operator that deploys all of the required components. The multicluster engine operator can be further configured during or after installation. Learn more about the advanced configuration options. 1.3.5.1. Deployed components Add one or more of the following attributes to the MultiClusterEngine custom resource: Table 1.3. Table list of the deployed components Name Description Enabled assisted-service Installs OpenShift Container Platform with minimal infrastructure prerequisites and comprehensive pre-flight validations True cluster-lifecycle Provides cluster management capabilities for OpenShift Container Platform and Kubernetes hub clusters True cluster-manager Manages various cluster-related operations within the cluster environment True cluster-proxy-addon Automates the installation of apiserver-network-proxy on both hub and managed clusters using a reverse proxy server True console-mce Enables the multicluster engine operator console plug-in True discovery Discovers and identifies new clusters within the OpenShift Cluster Manager True hive Provisions and performs initial configuration of OpenShift Container Platform clusters True hypershift Hosts OpenShift Container Platform control planes at scale with cost and time efficiency, and cross-cloud portability True hypershift-local-hosting Enables local hosting capabilities for within the local cluster environment True local-cluster Enables the import and self-management of the local hub cluster where the multicluster engine operator is deployed True managedserviceacccount Synchronizes service accounts to managed clusters, and collects tokens as secret resources to give back to the hub cluster True server-foundation Provides foundational services for server-side operations within the multicluster environment True When you install multicluster engine operator on to the cluster, not all of the listed components are enabled by default. You can further configure multicluster engine operator during or after installation by adding one or more attributes to the MultiClusterEngine custom resource. Continue reading for information about the attributes that you can add. 1.3.5.2. Console and component configuration The following example displays the spec.overrides default template that you can use to enable or disable the component: apiVersion: operator.open-cluster-management.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: <name> 1 enabled: true Replace name with the name of the component. Alternatively, you can run the following command. Replace namespace with the name of your project and name with the name of the component: 1.3.5.3. Local-cluster enablement By default, the cluster that is running multicluster engine operator manages itself. To install multicluster engine operator without the cluster managing itself, specify the following values in the spec.overrides.components settings in the MultiClusterEngine section: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: local-cluster enabled: false The name value identifies the hub cluster as a local-cluster . The enabled setting specifies whether the feature is enabled or disabled. When the value is true , the hub cluster manages itself. When the value is false , the hub cluster does not manage itself. A hub cluster that is managed by itself is designated as the local-cluster in the list of clusters. 1.3.5.4. Custom image pull secret If you plan to import Kubernetes clusters that were not created by OpenShift Container Platform or the multicluster engine operator, generate a secret that contains your OpenShift Container Platform pull secret information to access the entitled content from the distribution registry. The secret requirements for OpenShift Container Platform clusters are automatically resolved by OpenShift Container Platform and multicluster engine for Kubernetes operator, so you do not have to create the secret if you are not importing other types of Kubernetes clusters to be managed. Important: These secrets are namespace-specific, so make sure that you are in the namespace that you use for your engine. Download your OpenShift Container Platform pull secret file from cloud.redhat.com/openshift/install/pull-secret by selecting Download pull secret . Your OpenShift Container Platform pull secret is associated with your Red Hat Customer Portal ID, and is the same across all Kubernetes providers. Run the following command to create your secret: Replace secret with the name of the secret that you want to create. Replace namespace with your project namespace, as the secrets are namespace-specific. Replace path-to-pull-secret with the path to your OpenShift Container Platform pull secret that you downloaded. The following example displays the spec.imagePullSecret template to use if you want to use a custom pull secret. Replace secret with the name of your pull secret: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: imagePullSecret: <secret> 1.3.5.5. Target namespace The operands can be installed in a designated namespace by specifying a location in the MultiClusterEngine custom resource. This namespace is created upon application of the MultiClusterEngine custom resource. Important: If no target namespace is specified, the operator will install to the multicluster-engine namespace and will set it in the MultiClusterEngine custom resource specification. The following example displays the spec.targetNamespace template that you can use to specify a target namespace. Replace target with the name of your destination namespace. Note: The target namespace cannot be the default namespace: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: targetNamespace: <target> 1.3.5.6. availabilityConfig The hub cluster has two availabilities: High and Basic . By default, the hub cluster has an availability of High , which gives hub cluster components a replicaCount of 2 . This provides better support in cases of failover but consumes more resources than the Basic availability, which gives components a replicaCount of 1 . Important: Set spec.availabilityConfig to Basic if you are using multicluster engine operator on a single-node OpenShift cluster. The following examples shows the spec.availabilityConfig template with Basic availability: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: availabilityConfig: "Basic" 1.3.5.7. nodeSelector You can define a set of node selectors in the MultiClusterEngine to install to specific nodes on your cluster. The following example shows spec.nodeSelector to assign pods to nodes with the label node-role.kubernetes.io/infra : spec: nodeSelector: node-role.kubernetes.io/infra: "" To define a set of node selectors for the Red Hat Advanced Cluster Management hub cluster, see nodeSelector in the product documentation. 1.3.5.8. tolerations You can define a list of tolerations to allow the MultiClusterEngine to tolerate specific taints defined on the cluster. The following example shows a spec.tolerations that matches a node-role.kubernetes.io/infra taint: spec: tolerations: - key: node-role.kubernetes.io/infra effect: NoSchedule operator: Exists The infra-node toleration is set on pods by default without specifying any tolerations in the configuration. Customizing tolerations in the configuration will replace this default behavior. To define a list of tolerations for the Red Hat Advanced Cluster Management hub cluster, see tolerations in the product documentation. 1.3.5.9. ManagedServiceAccount add-on The ManagedServiceAccount add-on allows you to create or delete a service account on a managed cluster. To install with this add-on enabled, include the following in the MultiClusterEngine specification in spec.overrides : apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: managedserviceaccount enabled: true The ManagedServiceAccount add-on can be enabled after creating MultiClusterEngine by editing the resource on the command line and setting the managedserviceaccount component to enabled: true . Alternatively, you can run the following command and replace <multiclusterengine-name> with the name of your MultiClusterEngine resource. 1.3.6. Uninstalling When you uninstall multicluster engine for Kubernetes operator, you see two different levels of the process: A custom resource removal and a complete operator uninstall . It might take up to five minutes to complete the uninstall process. The custom resource removal is the most basic type of uninstall that removes the custom resource of the MultiClusterEngine instance but leaves other required operator resources. This level of uninstall is helpful if you plan to reinstall using the same settings and components. The second level is a more complete uninstall that removes most operator components, excluding components such as custom resource definitions. When you continue with this step, it removes all of the components and subscriptions that were not removed with the custom resource removal. After this uninstall, you must reinstall the operator before reinstalling the custom resource. 1.3.6.1. Prerequisite: Detach enabled services Before you uninstall the multicluster engine for Kubernetes operator, you must detach all of the clusters that are managed by that engine. To avoid errors, detach all clusters that are still managed by the engine, then try to uninstall again. If you have managed clusters attached, you might see the following message. For more information about detaching clusters, see the Removing a cluster from management section by selecting the information for your provider in Cluster creation introduction . 1.3.6.2. Removing resources by using commands If you have not already. ensure that your OpenShift Container Platform CLI is configured to run oc commands. See Getting started with the OpenShift CLI in the OpenShift Container Platform documentation for more information about how to configure the oc commands. Change to your project namespace by entering the following command. Replace namespace with the name of your project namespace: Enter the following command to remove the MultiClusterEngine custom resource: You can view the progress by entering the following command: Enter the following commands to delete the multicluster-engine ClusterServiceVersion in the namespace it is installed in: The CSV version shown here may be different. 1.3.6.3. Deleting the components by using the console When you use the RedHat OpenShift Container Platform console to uninstall, you remove the operator. Complete the following steps to uninstall by using the console: In the OpenShift Container Platform console navigation, select Operators > Installed Operators > multicluster engine for Kubernetes . Remove the MultiClusterEngine custom resource. Select the tab for Multiclusterengine . Select the Options menu for the MultiClusterEngine custom resource. Select Delete MultiClusterEngine . Run the clean-up script according to the procedure in the following section. Tip: If you plan to reinstall the same multicluster engine for Kubernetes operator version, you can skip the rest of the steps in this procedure and reinstall the custom resource. Navigate to Installed Operators . Remove the _ multicluster engine for Kubernetes_ operator by selecting the Options menu and selecting Uninstall operator . 1.3.6.4. Troubleshooting Uninstall If the multicluster engine custom resource is not being removed, remove any potential remaining artifacts by running the clean-up script. Copy the following script into a file: See Disconnected installation mirroring for more information. 1.4. Red Hat Advanced Cluster Management integration If you are using multicluster engine operator with Red Hat Advanced Cluster Management installed, you can access more multicluster management features, such as Observability and Policy . For integrated capability, see the following requirements: Install Red Hat Advanced Cluster Management. See the Red Hat Advanced Cluster Management Installing and upgrading documentation. See MultiClusterHub advanced configuration for details about Red Hat Advanced Cluster Management after you install. See the following procedures for multicluster engine operator and Red Hat Advanced Cluster Management multicluster management: Discovering multicluster engine operator hosted clusters in Red Hat Advanced Cluster Management Automating import for discovered hosted clusters Automating import for discovered OpenShift Service on AWS clusters Observability integration 1.4.1. Discovering multicluster engine operator hosted clusters in Red Hat Advanced Cluster Management If you have multicluster engine operator clusters that are hosting multiple hosted clusters , you can bring those hosted clusters to a Red Hat Advanced Cluster Management hub cluster to manage with Red Hat Advanced Cluster Management management components, such as Application lifecycle and Governance . You can have those hosted clusters automatically discovered and imported as managed clusters. Note: Since the hosted control planes run on the managed multicluster engine operator cluster nodes, the number of hosted control planes that the cluster can host is determined by the resource availability of managed multicluster engine operator cluster nodes, as well as the number of managed multicluster engine operator clusters. You can add more nodes or managed clusters to host more hosted control planes. Required access: Cluster administrator Prerequisites Configuring Red Hat Advanced Cluster Management to import multicluster engine operator clusters Importing multicluster engine operator manually Discovering hosted clusters from multicluster engine operator 1.4.1.1. Prerequisites You need one or more multicluster engine operator clusters. You need a Red Hat Advanced Cluster Management cluster set as your hub cluster. Install the clusteradm CLI by running the following command: curl -L https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh | bash 1.4.1.2. Configuring Red Hat Advanced Cluster Management to import multicluster engine operator clusters multicluster engine operator has a local-cluster , which is a hub cluster that is managed. The following default addons are enabled for this local-cluster in the open-cluster-management-agent-addon namespace: cluster-proxy managed-serviceaccount work-manager 1.4.1.2.1. Configuring add-ons When your multicluster engine operator is imported into Red Hat Advanced Cluster Management, Red Hat Advanced Cluster Management enables the same set of add-ons to manage the multicluster engine operator. Install those add-ons in a different multicluster engine operator namespace so that the multicluster engine operator can self-manage with the local-cluster add-ons while Red Hat Advanced Cluster Management manages multicluster engine operator at the same time. Complete the following procedure: Log in to your Red Hat Advanced Cluster Management with the CLI. Create the addonDeploymentConfig resource to specify a different add-on installation namespace. See the following example where agentInstallNamespace points to open-cluster-management-agent-addon-discovery : apiVersion: addon.open-cluster-management.io/v1alpha1 kind: addonDeploymentConfig metadata: name: addon-ns-config namespace: multicluster-engine spec: agentInstallNamespace: open-cluster-management-agent-addon-discovery Run oc apply -f <filename>.yaml to apply the file. Update the existing ClusterManagementAddOn resources for the add-ons so that the add-ons are installed in the open-cluster-management-agent-addon-discovery namespace that is specified in the addonDeploymentConfig resource that you created. See the following example with open-cluster-management-global-set as the namespace: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: work-manager spec: addonMeta: displayName: work-manager installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All type: Placements Add the addonDeploymentConfigs to the ClusterManagementAddOn . See the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: work-manager spec: addonMeta: displayName: work-manager installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All configs: - group: addon.open-cluster-management.io name: addon-ns-config namespace: multicluster-engine resource: addondeploymentconfigs type: Placements Add the addonDeploymentConfig to the managed-serviceaccount . See the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: managed-serviceaccount spec: addonMeta: displayName: managed-serviceaccount installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All configs: - group: addon.open-cluster-management.io name: addon-ns-config namespace: multicluster-engine resource: addondeploymentconfigs type: Placements Add the addondeploymentconfigs value to the ClusterManagementAddOn resource named, cluster-proxy . See the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: cluster-proxy spec: addonMeta: displayName: cluster-proxy installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All configs: - group: addon.open-cluster-management.io name: addon-ns-config namespace: multicluster-engine resource: addondeploymentconfigs type: Placements Run the following command to verify that the add-ons for the Red Hat Advanced Cluster Management local-cluster are re-installed into the namespace that you specified: oc get deployment -n open-cluster-management-agent-addon-discovery See the following output example: NAME READY UP-TO-DATE AVAILABLE AGE cluster-proxy-proxy-agent 1/1 1 1 24h klusterlet-addon-workmgr 1/1 1 1 24h managed-serviceaccount-addon-agent 1/1 1 1 24h 1.4.1.2.2. Creating a KlusterletConfig resource multicluster engine operator has a local-cluster, which is a hub cluster that is managed. A resource named klusterlet is created for this local-cluster. When your multicluster engine operator is imported into Red Hat Advanced Cluster Management, Red Hat Advanced Cluster Management installs the klusterlet with the same name, klusterlet , to manage the multicluster engine operator. This conflicts with the multicluster engine operator local-cluster klusterlet. You need to create a KlusterletConfig resource that is used by ManagedCluster resources to import multicluster engine operator clusters so that the klusterlet is installed with a different name to avoid the conflict. Complete the following procedure: Create a KlusterletConfig resource using the following example. When this KlusterletConfig resource is referenced in a managed cluster, the value in the spec.installMode.noOperator.postfix field is used as a suffix to the klusterlet name, such as klusterlet-mce-import : kind: KlusterletConfig apiVersion: config.open-cluster-management.io/v1alpha1 metadata: name: mce-import-klusterlet-config spec: installMode: type: noOperator noOperator: postfix: mce-import Run oc apply -f <filename>.yaml to apply the file. 1.4.1.2.3. Configure for backup and restore Since you installed Red Hat Advanced Cluster Management, you can also use the Backup and restore feature. If the hub cluster is restored in a disaster recovery scenario, the imported multicluster engine operator clusters and hosted clusters are imported to the newer Red Hat Advanced Cluster Management hub cluster. In this scenario, you need to restore the configurations as part of Red Hat Advanced Cluster Management hub cluster restore. Add the backup=true label to enable backup. See the following steps for each add-on: For your addon-ns-config , run the following command: oc label addondeploymentconfig addon-ns-config -n multicluster-engine cluster.open-cluster-management.io/backup=true For your hypershift-addon-deploy-config , run the following command: oc label addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine cluster.open-cluster-management.io/backup=true For your work-manager , run the following command: oc label clustermanagementaddon work-manager cluster.open-cluster-management.io/backup=true For your `cluster-proxy `, run the following command: oc label clustermanagementaddon cluster-proxy cluster.open-cluster-management.io/backup=true For your managed-serviceaccount , run the following command: oc label clustermanagementaddon managed-serviceaccount cluster.open-cluster-management.io/backup=true For your mce-import-klusterlet-config , run the following command: oc label KlusterletConfig mce-import-klusterlet-config cluster.open-cluster-management.io/backup=true 1.4.1.3. Importing multicluster engine operator manually To manually import an multicluster engine operator cluster from your Red Hat Advanced Cluster Management cluster, complete the following procedure: From your Red Hat Advanced Cluster Management cluster, create a ManagedCluster resource manually to import an multicluster engine operator cluster. See the following file example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: agent.open-cluster-management.io/klusterlet-config: mce-import-klusterlet-config 1 labels: cloud: auto-detect vendor: auto-detect name: mce-a 2 spec: hubAcceptsClient: true leaseDurationSeconds: 60 1 1 The mce-import-klusterlet-config annotation references the KlusterletConfig resource that you created in the step to install the Red Hat Advanced Cluster Management klusterlet with a different name in multicluster engine operator. 2 The example imports an multicluster engine operator managed cluster named mce-a . Run oc apply -f <filename>.yaml to apply the file. Create the auto-import-secret secret that references the kubeconfig of the multicluster engine operator cluster. Go to Importing a cluster by using the auto import secret to add the auto import secret to complete the multicluster engine operator auto-import process. After you create the auto import secret in the multicluster engine operator managed cluster namespace in the Red Hat Advanced Cluster Management cluster, the managed cluster is registered. Run the following command to get the status: oc get managedcluster See following example output with the status and example URLs of managed clusters: NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE local-cluster true https://<api.acm-hub.com:port> True True 44h mce-a true https://<api.mce-a.com:port> True True 27s Important: Do not enable any other Red Hat Advanced Cluster Management add-ons for the imported multicluster engine operator. 1.4.1.4. Discovering hosted clusters After all your multicluster engine operator clusters are imported into Red Hat Advanced Cluster Management, you need to enable the hypershift-addon for those managed multicluster engine operator clusters to discover the hosted clusters. Default add-ons are installed into a different namespace in the procedures. Similarly, you install the hypershift-addon into a different namespace in multicluster engine operator so that the add-ons agent for multicluster engine operator local-cluster and the agent for Red Hat Advanced Cluster Management can work in multicluster engine operator. Important: For all the following commands, replace <managed-cluster-names> with comma-separated managed cluster names for multicluster engine operator. Run the following command to set the agentInstallNamespace namespace of the add-on to open-cluster-management-agent-addon-discovery : oc patch addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine --type=merge -p '{"spec":{"agentInstallNamespace":"open-cluster-management-agent-addon-discovery"}}' Run the following command to disable metrics and to disable the HyperShift operator management: oc patch addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine --type=merge -p '{"spec":{"customizedVariables":[{"name":"disableMetrics","value": "true"},{"name":"disableHOManagement","value": "true"}]}}' Run the following command to enable the hypershift-addon for multicluster engine operator: clusteradm addon enable --names hypershift-addon --clusters <managed-cluster-names> You can get the multicluster engine operator managed cluster names by running the following command in Red Hat Advanced Cluster Management. oc get managedcluster Log into multicluster engine operator clusters and verify that the hypershift-addon is installed in the namespace that you specified. Run the following command: oc get deployment -n open-cluster-management-agent-addon-discovery See the following example output that lists the add-ons: NAME READY UP-TO-DATE AVAILABLE AGE cluster-proxy-proxy-agent 1/1 1 1 24h klusterlet-addon-workmgr 1/1 1 1 24h hypershift-addon-agent 1/1 1 1 24h managed-serviceaccount-addon-agent 1/1 1 1 24h Red Hat Advanced Cluster Management deploys the hypershift-addon , which is the discovery agent that discovers hosted clusters from multicluster engine operator. The agent creates the corresponding DiscoveredCluster custom resource in the multicluster engine operator managed cluster namespace in the Red Hat Advanced Cluster Management hub cluster when the hosted cluster kube-apiserver becomes available. You can view your discovered clusters in the console. Log into hub cluster console and navigate to All Clusters > Infrastructure > Clusters . Find the Discovered clusters tab to view all discovered hosted clusters from multicluster engine operator with type MultiClusterEngineHCP . , visit Automating import for discovered hosted clusters to learn how to automatically import clusters. 1.4.2. Automating import for discovered hosted clusters Automate the import of hosted clusters by using the DiscoveredCluster resource for faster cluster management, without manually importing individual clusters. When you automatically import a discovered hosted cluster into Red Hat Advanced Cluster Management, all Red Hat Advanced Cluster Management add-ons are enabled so that you can start managing the hosted clusters with the available management tools. The hosted cluster is also auto-imported into multicluster engine operator. Through the multicluster engine operator console, you can manage the hosted cluster lifecycle. However, you cannot manage the hosted cluster lifecycle from the Red Hat Advanced Cluster Management console. Required access: Cluster administrator Prerequisites Configure settings for automatic import Creating the placement definition Binding the import policy to a placement definition 1.4.2.1. Prerequisites You need Red Hat Advanced Cluster Management installed. See the Red Hat Advanced Cluster Management Installing and upgrading documentation. You need to learn about Policies . See the introduction to Governance in the Red Hat Advanced Cluster Management documentation. 1.4.2.2. Configuring settings for automatic import Discovered hosted clusters from managed multicluster engine operator clusters are represented in DiscoveredCluster custom resources, which are located in the managed multicluster engine operator cluster namespace in Red Hat Advanced Cluster Management. See the following DiscoveredCluster resource and namespace example: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: creationTimestamp: "2024-05-30T23:05:39Z" generation: 1 labels: hypershift.open-cluster-management.io/hc-name: hosted-cluster-1 hypershift.open-cluster-management.io/hc-namespace: clusters name: hosted-cluster-1 namespace: mce-1 resourceVersion: "1740725" uid: b4c36dca-a0c4-49f9-9673-f561e601d837 spec: apiUrl: https://a43e6fe6dcef244f8b72c30426fb6ae3-ea3fec7b113c88da.elb.us-west-1.amazonaws.com:6443 cloudProvider: aws creationTimestamp: "2024-05-30T23:02:45Z" credential: {} displayName: mce-1-hosted-cluster-1 importAsManagedCluster: false isManagedCluster: false name: hosted-cluster-1 openshiftVersion: 0.0.0 status: Active type: MultiClusterEngineHCP These discovered hosted clusters are not automatically imported into Red Hat Advanced Cluster Management until the spec.importAsManagedCluster field is set to true . Learn how to use a Red Hat Advanced Cluster Management policy to automatically set this field to true for all type.MultiClusterEngineHCP within DiscoveredCluster resources so that discovered hosted clusters are immediately automatically imported into Red Hat Advanced Cluster Management. Configure your Policy to import all your discovered hosted clusters automatically. Log in to your hub cluster from the CLI to complete the following procedure: Create a YAML file for your DiscoveredCluster custom resource and edit the configuration that is referenced in the following example: apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-mce-hcp-autoimport namespace: open-cluster-management-global-set annotations: policy.open-cluster-management.io/standards: NIST SP 800-53 policy.open-cluster-management.io/categories: CM Configuration Management policy.open-cluster-management.io/controls: CM-2 Baseline Configuration policy.open-cluster-management.io/description: Discovered clusters that are of type MultiClusterEngineHCP can be automatically imported into ACM as managed clusters. This policy configure those discovered clusters so they are automatically imported. Fine tuning MultiClusterEngineHCP clusters to be automatically imported can be done by configure filters at the configMap or add annotation to the discoverd cluster. spec: disabled: false policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: mce-hcp-autoimport-config spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: v1 kind: ConfigMap metadata: name: discovery-config namespace: open-cluster-management-global-set data: rosa-filter: "" remediationAction: enforce 1 severity: low - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-mce-hcp-autoimport spec: remediationAction: enforce severity: low object-templates-raw: | {{- /* find the MultiClusterEngineHCP DiscoveredClusters */ -}} {{- range USDdc := (lookup "discovery.open-cluster-management.io/v1" "DiscoveredCluster" "" "").items }} {{- /* Check for the flag that indicates the import should be skipped */ -}} {{- USDskip := "false" -}} {{- range USDkey, USDvalue := USDdc.metadata.annotations }} {{- if and (eq USDkey "discovery.open-cluster-management.io/previously-auto-imported") (eq USDvalue "true") }} {{- USDskip = "true" }} {{- end }} {{- end }} {{- /* if the type is MultiClusterEngineHCP and the status is Active */ -}} {{- if and (eq USDdc.spec.status "Active") (contains (fromConfigMap "open-cluster-management-global-set" "discovery-config" "mce-hcp-filter") USDdc.spec.displayName) (eq USDdc.spec.type "MultiClusterEngineHCP") (eq USDskip "false") }} - complianceType: musthave objectDefinition: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: {{ USDdc.metadata.name }} namespace: {{ USDdc.metadata.namespace }} spec: importAsManagedCluster: true 2 {{- end }} {{- end }} 1 To enable automatic import, change the spec.remediationAction to enforce . 2 To enable automatic import, change spec.importAsManagedCluster to true . Run oc apply -f <filename>.yaml -n <namespace> to apply the file. 1.4.2.3. Creating the placement definition You need to create a placement definition that specifies the managed cluster for the policy deployment. Complete the following procedure: Create the Placement definition that selects only the local-cluster , which is a hub cluster that is managed. Use the following YAML sample: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: policy-mce-hcp-autoimport-placement namespace: open-cluster-management-global-set spec: tolerations: - key: cluster.open-cluster-management.io/unreachable operator: Exists - key: cluster.open-cluster-management.io/unavailable operator: Exists clusterSets: - global predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: local-cluster operator: In values: - "true" Run oc apply -f placement.yaml -n <namespace> , where namespace matches the namespace that you used for the policy that you previously created. 1.4.2.4. Binding the import policy to a placement definition After you create the policy and the placement, you need to connect the two resources. Complete the following steps: Connect the resources by using a PlacementBinding resource. See the following example where placementRef points to the Placement that you created, and subjects points to the Policy that you created: apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: policy-mce-hcp-autoimport-placement-binding namespace: open-cluster-management-global-set placementRef: name: policy-mce-hcp-autoimport-placement apiGroup: cluster.open-cluster-management.io kind: Placement subjects: - name: policy-mce-hcp-autoimport apiGroup: policy.open-cluster-management.io kind: Policy To verify, run the following command: oc get policy policy-mce-hcp-autoimport -n <namespace> Important: You can detach a hosted cluster from Red Hat Advanced Cluster Management by using the Detach option in the Red Hat Advanced Cluster Management console, or by removing the corresponding ManagedCluster custom resource from the command line. For best results, detach the managed hosted cluster before destroying the hosted cluster. When a discovered cluster is detached, the following annotation is added to the DiscoveredCluster resource to prevent the policy to import the discovered cluster again. annotations: discovery.open-cluster-management.io/previously-auto-imported: "true" If you want the detached discovered cluster to be reimported, remove this annotation. 1.4.3. Automating import for discovered OpenShift Service on AWS clusters Automate the import of OpenShift Service on AWS clusters by using Red Hat Advanced Cluster Management policy enforcement for faster cluster management, without manually importing individual clusters. Required access: Cluster administrator Prerequisites Creating the automatic import policy Creating the placement definition Binding the import policy to a placement definition 1.4.3.1. Prerequisites You need Red Hat Advanced Cluster Management installed. See the Red Hat Advanced Cluster Management Installing and upgrading documentation. You need to learn about Policies . See the introduction to Governance in the Red Hat Advanced Cluster Management documentation. 1.4.3.2. Creating the automatic import policy The following policy and procedure is an example of how to import all your discovered OpenShift Service on AWS clusters automatically. Log in to your hub cluster from the CLI to complete the following procedure: Create a YAML file with the following example and apply the changes that are referenced: apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-rosa-autoimport annotations: policy.open-cluster-management.io/standards: NIST SP 800-53 policy.open-cluster-management.io/categories: CM Configuration Management policy.open-cluster-management.io/controls: CM-2 Baseline Configuration policy.open-cluster-management.io/description: OpenShift Service on AWS discovered clusters can be automatically imported into Red Hat Advanced Cluster Management as managed clusters with this policy. You can select and configure those managed clusters so you can import. Configure filters or add an annotation if you do not want all of your OpenShift Service on AWS clusters to be automatically imported. spec: remediationAction: inform 1 disabled: false policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: rosa-autoimport-config spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: v1 kind: ConfigMap metadata: name: discovery-config namespace: open-cluster-management-global-set data: rosa-filter: "" 2 remediationAction: enforce severity: low - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-rosa-autoimport spec: remediationAction: enforce severity: low object-templates-raw: | {{- /* find the ROSA DiscoveredClusters */ -}} {{- range USDdc := (lookup "discovery.open-cluster-management.io/v1" "DiscoveredCluster" "" "").items }} {{- /* Check for the flag that indicates the import should be skipped */ -}} {{- USDskip := "false" -}} {{- range USDkey, USDvalue := USDdc.metadata.annotations }} {{- if and (eq USDkey "discovery.open-cluster-management.io/previously-auto-imported") (eq USDvalue "true") }} {{- USDskip = "true" }} {{- end }} {{- end }} {{- /* if the type is ROSA and the status is Active */ -}} {{- if and (eq USDdc.spec.status "Active") (contains (fromConfigMap "open-cluster-management-global-set" "discovery-config" "rosa-filter") USDdc.spec.displayName) (eq USDdc.spec.type "ROSA") (eq USDskip "false") }} - complianceType: musthave objectDefinition: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: {{ USDdc.metadata.name }} namespace: {{ USDdc.metadata.namespace }} spec: importAsManagedCluster: true {{- end }} {{- end }} - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-rosa-managedcluster-status spec: remediationAction: enforce severity: low object-templates-raw: | {{- /* Use the same DiscoveredCluster list to check ManagedCluster status */ -}} {{- range USDdc := (lookup "discovery.open-cluster-management.io/v1" "DiscoveredCluster" "" "").items }} {{- /* Check for the flag that indicates the import should be skipped */ -}} {{- USDskip := "false" -}} {{- range USDkey, USDvalue := USDdc.metadata.annotations }} {{- if and (eq USDkey "discovery.open-cluster-management.io/previously-auto-imported") (eq USDvalue "true") }} {{- USDskip = "true" }} {{- end }} {{- end }} {{- /* if the type is ROSA and the status is Active */ -}} {{- if and (eq USDdc.spec.status "Active") (contains (fromConfigMap "open-cluster-management-global-set" "discovery-config" "rosa-filter") USDdc.spec.displayName) (eq USDdc.spec.type "ROSA") (eq USDskip "false") }} - complianceType: musthave objectDefinition: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: {{ USDdc.spec.displayName }} namespace: {{ USDdc.spec.displayName }} status: conditions: - type: ManagedClusterConditionAvailable status: "True" {{- end }} {{- end }} 1 To enable automatic import, change the spec.remediationAction to enforce . 2 Optional: Specify a value here to select a subset of the matching OpenShift Service on AWS clusters, which are based on discovered cluster names. The rosa-filter has no value by default, so the filter does not restrict cluster names without a subset value. Run oc apply -f <filename>.yaml -n <namespace> to apply the file. 1.4.3.3. Creating the placement definition You need to create a placement definition that specifies the managed cluster for the policy deployment. Create the placement definition that selects only the local-cluster , which is a hub cluster that is managed. Use the following YAML sample: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement-openshift-plus-hub spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: name operator: In values: - local-cluster Run oc apply -f placement.yaml -n <namespace> , where namespace matches the namespace that you used for the policy that you previously created. 1.4.3.4. Binding the import policy to a placement definition After you create the policy and the placement, you need to connect the two resources. Connect the resources by using a PlacementBinding . See the following example where placementRef points to the Placement that you created, and subjects points to the Policy that you created: apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-rosa-autoimport placementRef: apiGroup: cluster.open-cluster-management.io kind: Placement name: placement-policy-rosa-autoimport subjects: - apiGroup: policy.open-cluster-management.io kind: Policy name: policy-rosa-autoimport To verify, run the following command: 1.4.4. Observability integration With the Red Hat Advanced Cluster Management Observability feature, you can view health and utilization of clusters across your fleet. You can install Red Hat Advanced Cluster Management and enable Observability. 1.4.4.1. Observing hosted control planes After you enable the multicluster-observability pod, you can use Red Hat Advanced Cluster Management Observability Grafana dashboards to view the following information about your hosted control planes: ACM > Hosted Control Planes Overview dashboard to see cluster capacity estimates for hosting hosted control planes, the related cluster resources, and the list and status of existing hosted control planes. ACM > Resources > Hosted Control Plane dashboard that you can access from the Overview page to see the resource utilizations of the selected hosted control plane. To enable, see Observability service . 1.5. Managing credentials A credential is required to create and manage a Red Hat OpenShift Container Platform cluster on a cloud service provider with multicluster engine operator. The credential stores the access information for a cloud provider. Each provider account requires its own credential, as does each domain on a single provider. You can create and manage your cluster credentials. Credentials are stored as Kubernetes secrets. Secrets are copied to the namespace of a managed cluster so that the controllers for the managed cluster can access the secrets. When a credential is updated, the copies of the secret are automatically updated in the managed cluster namespaces. Note: Changes to the pull secret, SSH keys, or base domain of the cloud provider credentials are not reflected for existing managed clusters, as they have already been provisioned using the original credentials. Required access: Edit Creating a credential for Amazon Web Services Creating a credential for Microsoft Azure Creating a credential for Google Cloud Platform Creating a credential for VMware vSphere Creating a credential for Red Hat OpenStack Platform Creating a credential for Red Hat OpenShift Cluster Manager Creating a credential for Ansible Automation Platform Creating a credential for an on-premises environment 1.5.1. Creating a credential for Amazon Web Services You need a credential to use multicluster engine operator console to deploy and manage an Red Hat OpenShift Container Platform cluster on Amazon Web Services (AWS). Required access: Edit Note: This procedure must be done before you can create a cluster with multicluster engine operator. 1.5.1.1. Prerequisites You must have the following prerequisites before creating a credential: A deployed multicluster engine operator hub cluster Internet access for your multicluster engine operator hub cluster so it can create the Kubernetes cluster on Amazon Web Services (AWS) AWS login credentials, which include access key ID and secret access key. See Understanding and getting your security credentials . Account permissions that allow installing clusters on AWS. See Configuring an AWS account for instructions on how to configure an AWS account. 1.5.1.2. Managing a credential by using the console To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, both for convenience and added security. You can optionally add a Base DNS domain for your credential. If you add the base DNS domain to the credential, it is automatically populated in the correct field when you create a cluster with this credential. See the following steps: Add your AWS access key ID for your AWS account. See Log in to AWS to find your ID. Provide the contents for your new AWS Secret Access Key . If you want to enable a proxy, enter the proxy information: HTTP proxy URL: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy URL: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy domains: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. Enter your Red Hat OpenShift pull secret. See Download your Red Hat OpenShift pull secret to download your pull secret. Add your SSH private key and SSH public key , which allows you to connect to the cluster. You can use an existing key pair, or create a new one with key generation program. You can create a cluster that uses this credential by completing the steps in Creating a cluster on Amazon Web Services or Creating a cluster on Amazon Web Services GovCloud . You can edit your credential in the console. If the cluster was created by using this provider connection, then the <cluster-name>-aws-creds> secret from <cluster-namespace> will get updated with the new credentials. Note: Updating credentials does not work for cluster pool claimed clusters. When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.5.1.2.1. Creating an S3 secret To create an Amazon Simple Storage Service (S3) secret, complete the following task from the console: Click Add credential > AWS > S3 Bucket . If you click For Hosted Control Plane , the name and namespace are provided. Enter information for the following fields that are provided: bucket name : Add the name of the S3 bucket. aws_access_key_id : Add your AWS access key ID for your AWS account. Log in to AWS to find your ID. aws_secret_access_key : Provide the contents for your new AWS Secret Access Key. Region : Enter your AWS region. 1.5.1.3. Creating an opaque secret by using the API To create an opaque secret for Amazon Web Services by using the API, apply YAML content in the YAML preview window that is similar to the following example: kind: Secret metadata: name: <managed-cluster-name>-aws-creds namespace: <managed-cluster-namespace> type: Opaque data: aws_access_key_id: USD(echo -n "USD{AWS_KEY}" | base64 -w0) aws_secret_access_key: USD(echo -n "USD{AWS_SECRET}" | base64 -w0) Notes: Opaque secrets are not visible in the console. Opaque secrets are created in the managed cluster namespace you chose. Hive uses the opaque secret to provision the cluster. When provisioning the cluster by using the Red Hat Advanced Cluster Management console, the credentials you previoulsy created are copied to the managed cluster namespace as the opaque secret. Add labels to your credentials to view your secret in the console. For example, the following AWS S3 Bucket oc label secret is appended with type=awss3 and credentials --from-file=... . : 1.5.1.4. Additional resources See Understanding and getting your security credentials . See Configuring an AWS account . Log in to AWS . Download your Red Hat OpenShift pull secret . See Generating a key pair for cluster node SSH access for more information about how to generate a key. See Creating a cluster on Amazon Web Services . See Creating a cluster on Amazon Web Services GovCloud . Return to Creating a credential for Amazon Web Services . 1.5.2. Creating a credential for Microsoft Azure You need a credential to use multicluster engine operator console to create and manage a Red Hat OpenShift Container Platform cluster on Microsoft Azure or on Microsoft Azure Government. Required access: Edit Note: This procedure is a prerequisite for creating a cluster with multicluster engine operator. 1.5.2.1. Prerequisites You must have the following prerequisites before creating a credential: A deployed multicluster engine operator hub cluster. Internet access for your multicluster engine operator hub cluster so that it can create the Kubernetes cluster on Azure. Azure login credentials, which include your Base Domain Resource Group and Azure Service Principal JSON. See Microsoft Azure portal to get your login credentials. Account permissions that allow installing clusters on Azure. See How to configure Cloud Services and Configuring an Azure account for more information. 1.5.2.2. Managing a credential by using the console To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, both for convenience and added security. Optional: Add a Base DNS domain for your credential. If you add the base DNS domain to the credential, it is automatically populated in the correct field when you create a cluster with this credential. Select whether the environment for your cluster is AzurePublicCloud or AzureUSGovernmentCloud . The settings are different for the Azure Government environment, so ensure that this is set correctly. Add your Base domain resource group name for your Azure account. This entry is the resource name that you created with your Azure account. You can find your Base Domain Resource Group Name by selecting Home > DNS Zones in the Azure interface. See Create an Azure service principal with the Azure CLI to find your base domain resource group name. Provide the contents for your Client ID . This value is generated as the appId property when you create a service principal with the following command: Replace service_principal with the name of your service principal. Add your Client Secret . This value is generated as the password property when you create a service principal with the following command: Replace service_principal with the name of your service principal. Add your Subscription ID . This value is the id property in the output of the following command: Add your Tenant ID . This value is the tenantId property in the output of the following command: If you want to enable a proxy, enter the proxy information: HTTP proxy URL: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy URL: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy domains: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. Enter your Red Hat OpenShift pull secret . See Download your Red Hat OpenShift pull secret to download your pull secret. Add your SSH private key and SSH public key to use to connect to the cluster. You can use an existing key pair, or create a new pair using a key generation program. You can create a cluster that uses this credential by completing the steps in Creating a cluster on Microsoft Azure . You can edit your credential in the console. When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.5.2.3. Creating an opaque secret by using the API To create an opaque secret for Microsoft Azure by using the API instead of the console, apply YAML content in the YAML preview window that is similar to the following example: kind: Secret metadata: name: <managed-cluster-name>-azure-creds namespace: <managed-cluster-namespace> type: Opaque data: baseDomainResourceGroupName: USD(echo -n "USD{azure_resource_group_name}" | base64 -w0) osServicePrincipal.json: USD(base64 -w0 "USD{AZURE_CRED_JSON}") Notes: Opaque secrets are not visible in the console. Opaque secrets are created in the managed cluster namespace you chose. Hive uses the opaque secret to provision the cluster. When provisioning the cluster by using the Red Hat Advanced Cluster Management console, the credentials you previoulsy created are copied to the managed cluster namespace as the opaque secret. 1.5.2.4. Additional resources See Microsoft Azure portal . See How to configure Cloud Services . See Configuring an Azure account . See Create an Azure service principal with the Azure CLI to find your base domain resource group name. Download your Red Hat OpenShift pull secret . See Generating a key pair for cluster node SSH access for more information about how to generate a key. See Creating a cluster on Microsoft Azure . Return to Creating a credential for Microsoft Azure . 1.5.3. Creating a credential for Google Cloud Platform You need a credential to use multicluster engine operator console to create and manage a Red Hat OpenShift Container Platform cluster on Google Cloud Platform (GCP). Required access: Edit Note: This procedure is a prerequisite for creating a cluster with multicluster engine operator. 1.5.3.1. Prerequisites You must have the following prerequisites before creating a credential: A deployed multicluster engine operator hub cluster Internet access for your multicluster engine operator hub cluster so it can create the Kubernetes cluster on GCP GCP login credentials, which include user Google Cloud Platform Project ID and Google Cloud Platform service account JSON key. See Creating and managing projects . Account permissions that allow installing clusters on GCP. See Configuring a GCP project for instructions on how to configure an account. 1.5.3.2. Managing a credential by using the console To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, for both convenience and security. You can optionally add a Base DNS domain for your credential. If you add the base DNS domain to the credential, it is automatically populated in the correct field when you create a cluster with this credential. See the following steps: Add your Google Cloud Platform project ID for your GCP account. See Log in to GCP to retrieve your settings. Add your Google Cloud Platform service account JSON key . See the Create service accounts documentation to create your service account JSON key. Follow the steps for the GCP console. Provide the contents for your new Google Cloud Platform service account JSON key . If you want to enable a proxy, enter the proxy information: HTTP proxy URL: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy URL: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy domains: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add and asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. Enter your Red Hat OpenShift pull secret. See Download your Red Hat OpenShift pull secret to download your pull secret. Add your SSH private key and SSH public key so you can access the cluster. You can use an existing key pair, or create a new pair using a key generation program. You can use this connection when you create a cluster by completing the steps in Creating a cluster on Google Cloud Platform . You can edit your credential in the console. When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.5.3.3. Creating an opaque secret by using the API To create an opaque secret for Google Cloud Platform by using the API instead of the console, apply YAML content in the YAML preview window that is similar to the following example: kind: Secret metadata: name: <managed-cluster-name>-gcp-creds namespace: <managed-cluster-namespace> type: Opaque data: osServiceAccount.json: USD(base64 -w0 "USD{GCP_CRED_JSON}") Notes: Opaque secrets are not visible in the console. Opaque secrets are created in the managed cluster namespace you chose. Hive uses the opaque secret to provision the cluster. When provisioning the cluster by using the Red Hat Advanced Cluster Management console, the credentials you previoulsy created are copied to the managed cluster namespace as the opaque secret. 1.5.3.4. Additional resources See Creating and managing projects . See Configuring a GCP project . Log in to GCP . See the Create service accounts to create your service account JSON key. Download your Red Hat OpenShift pull secret . See Generating a key pair for cluster node SSH access for more information about how to generate a key. See Creating a cluster on Google Cloud Platform . Return to Creating a credential for Google Cloud Platform . 1.5.4. Creating a credential for VMware vSphere You need a credential to use multicluster engine operator console to deploy and manage a Red Hat OpenShift Container Platform cluster on VMware vSphere. Required access: Edit 1.5.4.1. Prerequisites You must have the following prerequisites before you create a credential: You must create a credential for VMware vSphere before you can create a cluster with multicluster engine operator. A deployed hub cluster on a supported OpenShift Container Platform version. Internet access for your hub cluster so it can create the Kubernetes cluster on VMware vSphere. VMware vSphere login credentials and vCenter requirements configured for OpenShift Container Platform when using installer-provisioned infrastructure. See Installing a cluster on vSphere with customizations . These credentials include the following information: vCenter account privileges. Cluster resources. DHCP available. ESXi hosts have time synchronized (for example, NTP). 1.5.4.2. Managing a credential by using the console To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, both for convenience and added security. You can optionally add a Base DNS domain for your credential. If you add the base DNS domain to the credential, it is automatically populated in the correct field when you create a cluster with this credential. See the following steps: Add your VMware vCenter server fully-qualified host name or IP address . The value must be defined in the vCenter server root CA certificate. If possible, use the fully-qualified host name. Add your VMware vCenter username . Add your VMware vCenter password . Add your VMware vCenter root CA certificate . You can download your certificate in the download.zip package with the certificate from your VMware vCenter server at: https://<vCenter_address>/certs/download.zip . Replace vCenter_address with the address to your vCenter server. Unpackage the download.zip . Use the certificates from the certs/<platform> directory that have a .0 extension. Tip: You can use the ls certs/<platform> command to list all of the available certificates for your platform. Replace <platform> with the abbreviation for your platform: lin , mac , or win . For example: certs/lin/3a343545.0 Best practice: Link together multiple certificates with a .0 extension by running the cat certs/lin/*.0 > ca.crt command. Add your VMware vSphere cluster name . Add your VMware vSphere datacenter . Add your VMware vSphere default datastore . Add your VMware vSphere disk type . Add your VMware vSphere folder . Add your VMware vSphere resource pool . For disconnected installations only: Complete the fields in the Configuration for disconnected installation subsection with the required information: Cluster OS image : This value contains the URL to the image to use for Red Hat OpenShift Container Platform cluster machines. Image content source : This value contains the disconnected registry path. The path contains the hostname, port, and repository path to all of the installation images for disconnected installations. Example: repository.com:5000/openshift/ocp-release . The path creates an image content source policy mapping in the install-config.yaml to the Red Hat OpenShift Container Platform release images. As an example, repository.com:5000 produces this imageContentSource content: - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release-nightly - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-v4.0-art-dev Additional trust bundle : This value provides the contents of the certificate file that is required to access the mirror registry. Note: If you are deploying managed clusters from a hub that is in a disconnected environment, and want them to be automatically imported post install, add an Image Content Source Policy to the install-config.yaml file by using the YAML editor. A sample entry is shown in the following example: - mirrors: - registry.example.com:5000/rhacm2 source: registry.redhat.io/rhacm2 If you want to enable a proxy, enter the proxy information: HTTP proxy URL: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy URL: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy domains: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add and asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. Enter your Red Hat OpenShift pull secret. See Download your Red Hat OpenShift pull secret to download your pull secret. Add your SSH private key and SSH public key , which allows you to connect to the cluster. You can use an existing key pair, or create a new one with key generation program. You can create a cluster that uses this credential by completing the steps in Creating a cluster on VMware vSphere . You can edit your credential in the console. When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.5.4.3. Creating an opaque secret by using the API To create an opaque secret for VMware vSphere by using the API instead of the console, apply YAML content in the YAML preview window that is similar to the following example: kind: Secret metadata: name: <managed-cluster-name>-vsphere-creds namespace: <managed-cluster-namespace> type: Opaque data: username: USD(echo -n "USD{VMW_USERNAME}" | base64 -w0) password.json: USD(base64 -w0 "USD{VMW_PASSWORD}") Notes: Opaque secrets are not visible in the console. Opaque secrets are created in the managed cluster namespace you chose. Hive uses the opaque secret to provision the cluster. When provisioning the cluster by using the Red Hat Advanced Cluster Management console, the credentials you previoulsy created are copied to the managed cluster namespace as the opaque secret. 1.5.4.4. Additional resources See Installing a cluster on vSphere with customizations . Download your Red Hat OpenShift pull secret . See Generating a key pair for cluster node SSH access for more information. See Creating a cluster on VMware vSphere . Return to Creating a credential for VMware vSphere . 1.5.5. Creating a credential for Red Hat OpenStack You need a credential to use multicluster engine operator console to deploy and manage a supported Red Hat OpenShift Container Platform cluster on Red Hat OpenStack Platform. Notes: You must create a credential for Red Hat OpenStack Platform before you can create a cluster with multicluster engine operator. 1.5.5.1. Prerequisites You must have the following prerequisites before you create a credential: A deployed hub cluster on a supported OpenShift Container Platform version. Internet access for your hub cluster so it can create the Kubernetes cluster on Red Hat OpenStack Platform. Red Hat OpenStack Platform login credentials and Red Hat OpenStack Platform requirements configured for OpenShift Container Platform when using installer-provisioned infrastructure. See Installing a cluster on OpenStack with customizations . Download or create a clouds.yaml file for accessing the CloudStack API. Within the clouds.yaml file: Determine the cloud auth section name to use. Add a line for the password , immediately following the username line. 1.5.5.2. Managing a credential by using the console To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. To enhance security and convenience, you can create a namespace specifically to host your credentials. Optional: You can add a Base DNS domain for your credential. If you add the base DNS domain, it is automatically populated in the correct field when you create a cluster with this credential. Add your Red Hat OpenStack Platform clouds.yaml file contents. The contents of the clouds.yaml file, including the password, provide the required information for connecting to the Red Hat OpenStack Platform server. The file contents must include the password, which you add to a new line immediately after the username . Add your Red Hat OpenStack Platform cloud name. This entry is the name specified in the cloud section of the clouds.yaml to use for establishing communication to the Red Hat OpenStack Platform server. Optional : For configurations that use an internal certificate authority, enter your certificate in the Internal CA certificate field to automatically update your clouds.yaml with the certificate information. For disconnected installations only: Complete the fields in the Configuration for disconnected installation subsection with the required information: Cluster OS image : This value contains the URL to the image to use for Red Hat OpenShift Container Platform cluster machines. Image content sources : This value contains the disconnected registry path. The path contains the hostname, port, and repository path to all of the installation images for disconnected installations. Example: repository.com:5000/openshift/ocp-release . The path creates an image content source policy mapping in the install-config.yaml to the Red Hat OpenShift Container Platform release images. As an example, repository.com:5000 produces this imageContentSource content: - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release-nightly - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-v4.0-art-dev Additional trust bundle : This value provides the contents of the certificate file that is required to access the mirror registry. Note: If you are deploying managed clusters from a hub that is in a disconnected environment, and want them to be automatically imported post install, add an Image Content Source Policy to the install-config.yaml file by using the YAML editor. A sample entry is shown in the following example: - mirrors: - registry.example.com:5000/rhacm2 source: registry.redhat.io/rhacm2 If you want to enable a proxy, enter the proxy information: HTTP proxy URL: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy URL: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy domains: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. Enter your Red Hat OpenShift pull secret. See Download your Red Hat OpenShift pull secret to download your pull secret. Add your SSH Private Key and SSH Public Key, which allows you to connect to the cluster. You can use an existing key pair, or create a new one with key generation program. Click Create . Review the new credential information, then click Add . When you add the credential, it is added to the list of credentials. You can create a cluster that uses this credential by completing the steps in Creating a cluster on Red Hat OpenStack Platform . You can edit your credential in the console. When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.5.5.3. Creating an opaque secret by using the API To create an opaque secret for Red Hat OpenStack Platform by using the API instead of the console, apply YAML content in the YAML preview window that is similar to the following example: kind: Secret metadata: name: <managed-cluster-name>-osp-creds namespace: <managed-cluster-namespace> type: Opaque data: clouds.yaml: USD(base64 -w0 "USD{OSP_CRED_YAML}") cloud: USD(echo -n "openstack" | base64 -w0) Notes: Opaque secrets are not visible in the console. Opaque secrets are created in the managed cluster namespace you chose. Hive uses the opaque secret to provision the cluster. When provisioning the cluster by using the Red Hat Advanced Cluster Management console, the credentials you previoulsy created are copied to the managed cluster namespace as the opaque secret. 1.5.5.4. Additional resources See Installing a cluster on OpenStack with customizations . Download your Red Hat OpenShift pull secret . See Generating a key pair for cluster node SSH access for more information. See Creating a cluster on Red Hat OpenStack Platform . Return to Creating a credential for Red Hat OpenStack . 1.5.6. Creating a credential for Red Hat OpenShift Cluster Manager Add an OpenShift Cluster Manager credential so that you can discover clusters. Required access: Administrator 1.5.6.1. Prerequisites You need access to a console.redhat.com account. Later you will need the value that can be obtained from console.redhat.com/openshift/token . 1.5.6.2. Managing a credential by using the console You need to add your credential to discover clusters. To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, both for convenience and added security. Your OpenShift Cluster Manager API token can be obtained from console.redhat.com/openshift/token . You can edit your credential in the console. When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. If your credential is removed, or your OpenShift Cluster Manager API token expires or is revoked, then the associated discovered clusters are removed. 1.5.7. Creating a credential for Ansible Automation Platform You need a credential to use multicluster engine operator console to deploy and manage an Red Hat OpenShift Container Platform cluster that is using Red Hat Ansible Automation Platform. Required access: Edit Note: This procedure must be done before you can create an Automation template to enable automation on a cluster. 1.5.7.1. Prerequisites You must have the following prerequisites before creating a credential: A deployed multicluster engine operator hub cluster Internet access for your multicluster engine operator hub cluster Ansible login credentials, which includes Ansible Automation Platform hostname and OAuth token; see Credentials for Ansible Automation Platform . Account permissions that allow you to install hub clusters and work with Ansible. Learn more about Ansible users . 1.5.7.2. Managing a credential by using the console To create a credential from the multicluster engine operator console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, both for convenience and added security. The Ansible Token and host URL that you provide when you create your Ansible credential are automatically updated for the automations that use that credential when you edit the credential. The updates are copied to any automations that use that Ansible credential, including those related to cluster lifecycle, governance, and application management automations. This ensures that the automations continue to run after the credential is updated. You can edit your credential in the console. Ansible credentials are automatically updated in your automation that use that credential when you update them in the credential. You can create an Ansible Job that uses this credential by completing the steps in Configuring Ansible Automation Platform tasks to run on managed clusters . When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.5.8. Creating a credential for an on-premises environment You need a credential to use the console to deploy and manage a Red Hat OpenShift Container Platform cluster in an on-premises environment. The credential specifies the connections that are used for the cluster. Required access: Edit Prerequisites Managing a credential by using the console 1.5.8.1. Prerequisites You need the following prerequisites before creating a credential: A hub cluster that is deployed. Internet access for your hub cluster so it can create the Kubernetes cluster on your infrastructure environment. For a disconnected environment, you must have a configured mirror registry where you can copy the release images for your cluster creation. See Disconnected installation mirroring in the OpenShift Container Platform documentation for more information. Account permissions that support installing clusters on the on-premises environment. 1.5.8.2. Managing a credential by using the console To create a credential from the console, complete the steps in the console. Start at the navigation menu. Click Credentials to choose from existing credential options. Tip: Create a namespace specifically to host your credentials, both for convenience and added security. Select Host inventory for your credential type. You can optionally add a Base DNS domain for your credential. If you add the base DNS domain to the credential, it is automatically populated in the correct field when you create a cluster with this credential. If you do not add the DNS domain, you can add it when you create your cluster. Enter your Red Hat OpenShift pull secret . This pull secret is automatically entered when you create a cluster and specify this credential. You can download your pull secret from Pull secret . See Using image pull secrets for more information about pull secrets. Enter your SSH public key . This SSH public key is also automatically entered when you create a cluster and specify this credential. Select Add to create your credential. You can create a cluster that uses this credential by completing the steps in Creating a cluster in an on-premises environment . When you are no longer managing a cluster that is using a credential, delete the credential to protect the information in the credential. Select Actions to delete in bulk, or select the options menu beside the credential that you want to delete. 1.6. Cluster lifecycle introduction The multicluster engine operator is the cluster lifecycle operator that provides cluster management capabilities for OpenShift Container Platform and Red Hat Advanced Cluster Management hub clusters. The multicluster engine operator is a software operator that enhances cluster fleet management and supports OpenShift Container Platform cluster lifecycle management across clouds and data centers. You can use multicluster engine operator with or without Red Hat Advanced Cluster Management. Red Hat Advanced Cluster Management also installs multicluster engine operator automatically and offers further multicluster capabilities. See the following documentation: Cluster lifecycle architecture Managing credentials overview Release images Specifying Release images Maintaining a custom list of release images while connected Maintaining a custom list of release images while disconnected Host inventory introduction Creating clusters Creating a cluster with the CLI Configuring additional manifests during cluster creation Creating a cluster on Amazon Web Services Creating a cluster on Amazon Web Services GovCloud Creating a cluster on Microsoft Azure Creating a cluster on Google Cloud Platform Creating a cluster on VMware vSphere Creating a cluster on Red Hat OpenStack Platform Creating a cluster in an on-premise environment Creating a cluster in a proxy environment Cluster import Importing a managed cluster by using the console Importing a managed cluster by using the CLI Specifying image registry on managed clusters for import Accessing your cluster Scaling managed clusters Hibernating a created cluster Upgrading your cluster Upgrading a disconnected cluster Enabling cluster proxy add-ons Configuring Ansible Automation Platform tasks to run on managed clusters ClusterClaims List existing ClusterClaims Create custom ClusterClaims ManagedClusterSets Creating a ManagedClusterSet Assigning RBAC permissions to a ManagedClusterSet Creating a ManagedClusterSetBinding resource Placing managed clusters by using taints and tolerations Removing a managed cluster from a ManagedClusterSet Placement Managing cluster pools (Technology Preview) Creating a cluster pool Claiming clusters from cluster pools Updating the cluster pool release image Scaling cluster pools Destroying a cluster pool Enabling ManagedServiceAccount Cluster lifecycle advanced configuration Removing a cluster from management 1.6.1. Cluster lifecycle architecture Cluster lifecycle requires two types of clusters: hub clusters and managed clusters . The hub cluster is the OpenShift Container Platform (or Red Hat Advanced Cluster Management) main cluster with the multicluster engine operator automatically installed. You can create, manage, and monitor other Kubernetes clusters with the hub cluster. You can create clusters by using the hub cluster, while you can also import existing clusters to be managed by the hub cluster. When you create a managed cluster, the cluster is created using the Red Hat OpenShift Container Platform cluster installer with the Hive resource. You can find more information about the process of installing clusters with the OpenShift Container Platform installer by reading Installing and configuring OpenShift Container Platform clusters in the OpenShift Container Platform documentation. The following diagram shows the components that are installed with the multicluster engine for Kubernetes operator for cluster management: The components of the cluster lifecycle management architecture include the following items: 1.6.1.1. Hub cluster The managed cluster import controller deploys the klusterlet operator to the managed clusters. The Hive controller provisions the clusters that you create by using the multicluster engine for Kubernetes operator. The Hive Controller also destroys managed clusters that were created by the multicluster engine for Kubernetes operator. The cluster curator controller creates the Ansible jobs as the pre-hook or post-hook to configure the cluster infrastructure environment when creating or upgrading managed clusters. When a managed cluster add-on is enabled on the hub cluster, its add-on hub controller is deployed on the hub cluster. The add-on hub controller deploys the add-on agent to the managed clusters. 1.6.1.2. Managed cluster The klusterlet operator deploys the registration and work controllers on the managed cluster. The Registration Agent registers the managed cluster and the managed cluster add-ons with the hub cluster. The Registration Agent also maintains the status of the managed cluster and the managed cluster add-ons. The following permissions are automatically created within the Clusterrole to allow the managed cluster to access the hub cluster: Allows the agent to get or update its owned cluster that the hub cluster manages Allows the agent to update the status of its owned cluster that the hub cluster manages Allows the agent to rotate its certificate Allows the agent to get or update the coordination.k8s.io lease Allows the agent to get its managed cluster add-ons Allows the agent to update the status of its managed cluster add-ons The work agent applies the Add-on Agent to the managed cluster. The permission to allow the managed cluster to access the hub cluster is automatically created within the Clusterrole and allows the agent to send events to the hub cluster. To continue adding and managing clusters, see the Cluster lifecycle introduction . 1.6.2. Release images When you build your cluster, use the version of Red Hat OpenShift Container Platform that the release image specifies. By default, OpenShift Container Platform uses the clusterImageSets resources to get the list of supported release images. Continue reading to learn more about release images: Specifying release images Maintaining a custom list of release images while connected Maintaining a custom list of release images while disconnected Synchronizing available release images 1.6.2.1. Specifying release images When you create a cluster on a provider by using multicluster engine for Kubernetes operator, specify a release image to use for your new cluster. To specify a release image, see the following topics: Locating ClusterImageSets Configuring ClusterImageSets Creating a release image to deploy a cluster on a different architecture 1.6.2.1.1. Locating ClusterImageSets The YAML files referencing the release images are maintained in the acm-hive-openshift-releases GitHub repository. The files are used to create the list of the available release images in the console. This includes the latest fast channel images from OpenShift Container Platform. The console only displays the latest release images for the three latest versions of OpenShift Container Platform. For example, you might see the following release image displayed in the console options: quay.io/openshift-release-dev/ocp-release:4.14.1-x86_64 The console displays the latest versions to help you create a cluster with the latest release images. If you need to create a cluster that is a specific version, older release image versions are also available. Note: You can only select images with the visible: 'true' label when creating clusters in the console. An example of this label in a ClusterImageSet resource is provided in the following content. Replace 4.x.1 with the current version of the product: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast visible: 'true' name: img4.x.1-x86-64-appsub spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.x.1-x86_64 Additional release images are stored, but are not visible in the console. To view all of the available release images, run the following command: The repository has the clusterImageSets directory, which is the directory that you use when working with the release images. The clusterImageSets directory has the following directories: Fast: Contains files that reference the latest versions of the release images for each supported OpenShift Container Platform version. The release images in this folder are tested, verified, and supported. Releases: Contains files that reference all of the release images for each OpenShift Container Platform version (stable, fast, and candidate channels) Note: These releases have not all been tested and determined to be stable. Stable: Contains files that reference the latest two stable versions of the release images for each supported OpenShift Container Platform version.. Note: By default, the current list of release images updates one time every hour. After upgrading the product, it might take up to one hour for the list to reflect the recommended release image versions for the new version of the product. 1.6.2.1.2. Configuring ClusterImageSets You can configure your ClusterImageSets with the following options: Option 1: To create a cluster in the console, specify the image reference for the specific ClusterImageSet that you want to us. Each new entry you specify persists and is available for all future cluster provisions See the following example entry: Option 2: Manually create and apply a ClusterImageSets YAML file from the acm-hive-openshift-releases GitHub repository. Option 3: To enable automatic updates of ClusterImageSets from a forked GitHub repository, follow the README.md in the cluster-image-set-controller GitHub repository. 1.6.2.1.3. Creating a release image to deploy a cluster on a different architecture You can create a cluster on an architecture that is different from the architecture of the hub cluster by manually creating a release image that has the files for both architectures. For example, you might need to create an x86_64 cluster from a hub cluster that is running on the ppc64le , aarch64 , or s390x architecture. If you create the release image with both sets of files, the cluster creation succeeds because the new release image enables the OpenShift Container Platform release registry to provide a multi-architecture image manifest. OpenShift Container Platform supports multiple architectures by default. You can use the following clusterImageSet to provision a cluster. Replace 4.x.0 with the current supported version: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast visible: 'true' name: img4.x.0-multi-appsub spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.x.0-multi To create the release image for OpenShift Container Platform images that do not support multiple architectures, complete steps similar to the following example for your architecture type: From the OpenShift Container Platform release registry , create a manifest list that includes x86_64 , s390x , aarch64 , and ppc64le release images. Pull the manifest lists for both architectures in your environment from the Quay repository by running the following example commands. Replace 4.x.1 with the current version of the product: Log in to your private repository where you maintain your images by running the following command. Replace <private-repo> with the path to your repository: Add the release image manifest to your private repository by running the following commands that apply to your environment. Replace 4.x.1 with the current version of the product. Replace <private-repo> with the path to your repository: Create a manifest for the new information by running the following command: Add references to both release images to the manifest list by running the following commands. Replace 4.x.1 with the current version of the product. Replace <private-repo> with the path to your repository: Merge the list in your manifest list with the existing manifest by running the following command. Replace <private-repo> with the path to your repository. Replace 4.x.1 with the current version: On the hub cluster, create a release image that references the manifest in your repository. Create a YAML file that contains information that is similar to the following example. Replace <private-repo> with the path to your repository. Replace 4.x.1 with the current version: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast visible: "true" name: img4.x.1-appsub spec: releaseImage: <private-repo>/ocp-release:4.x.1 Run the following command on your hub cluster to apply the changes. Replace <file-name> with the name of the YAML file that you created in the step: Select the new release image when you create your OpenShift Container Platform cluster. If you deploy the managed cluster by using the Red Hat Advanced Cluster Management console, specify the architecture for the managed cluster in the Architecture field during the cluster creation process. The creation process uses the merged release images to create the cluster. 1.6.2.1.4. Additional resources See the acm-hive-openshift-releases GitHub repository for the YAML files that reference the release images. See the cluster-image-set-controller GitHub repository to learn how to enable enable automatic updates of ClusterImageSets resources from a forked GitHub repository. 1.6.2.2. Maintaining a custom list of release images when connected You might want to use the same release image for all of your clusters. To simplify, you can create your own custom list of release images that are available when creating a cluster. Complete the following steps to manage your available release images: Fork the acm-hive-openshift-releases GitHub . Add the YAML files for the images that you want available when you create a cluster. Add the images to the ./clusterImageSets/stable/ or ./clusterImageSets/fast/ directory by using the Git console or the terminal. Create a ConfigMap in the multicluster-engine namespace named cluster-image-set-git-repo . See the following example, but replace 2.x with 2.6: apiVersion: v1 kind: ConfigMap metadata: name: cluster-image-set-git-repo namespace: multicluster-engine data: gitRepoUrl: <forked acm-hive-openshift-releases repository URL> gitRepoBranch: backplane-<2.x> gitRepoPath: clusterImageSets channel: <fast or stable> You can retrieve the available YAML files from the main repository by merging changes in to your forked repository with the following procedure: Commit and merge your changes to your forked repository. To synchronize your list of fast release images after you clone the acm-hive-openshift-releases repository, update the value of channel field in the cluster-image-set-git-repo ConfigMap to fast . To synchronize and display the stable release images, update the value of channel field in the cluster-image-set-git-repo ConfigMap to stable . After updating the ConfigMap , the list of available stable release images updates with the currently available images in about one minute. You can use the following commands to list what is available and remove the defaults. Replace <clusterImageSet_NAME> with the correct name: View the list of currently available release images in the console when you are creating a cluster. For information regarding other fields available through the ConfigMap , view the cluster-image-set-controller GitHub repository README . 1.6.2.3. Maintaining a custom list of release images while disconnected In some cases, you need to maintain a custom list of release images when the hub cluster has no Internet connection. You can create your own custom list of release images that are available when creating a cluster. Complete the following steps to manage your available release images while disconnected: While you are on a connected system, navigate to the acm-hive-openshift-releases GitHub repository to access the cluster image sets that are available. Copy the clusterImageSets directory to a system that can access the disconnected multicluster engine operator cluster. Add the mapping between the managed cluster and the disconnected repository with your cluster image sets by completing the following steps that fits your managed cluster: For an OpenShift Container Platform managed cluster, see Configuring image registry repository mirroring for information about using your ImageContentSourcePolicy object to complete the mapping. For a managed cluster that is not an OpenShift Container Platform cluster, use the ManageClusterImageRegistry custom resource definition to override the location of the image sets. See Specifying registry images on managed clusters for import for information about how to override the cluster for the mapping. Add the YAML files for the images that you want available when you create a cluster by using the console or CLI to manually add the clusterImageSet YAML content. Modify the clusterImageSet YAML files for the remaining OpenShift Container Platform release images to reference the correct offline repository where you store the images. Your updates resemble the following example where spec.releaseImage uses your offline image registry of the release image, and the release image is referenced by digest: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast name: img<4.x.x>-x86-64-appsub spec: releaseImage: IMAGE_REGISTRY_IPADDRESS_or__DNSNAME/REPO_PATH/ocp-release@sha256:073a4e46289be25e2a05f5264c8f1d697410db66b960c9ceeddebd1c61e58717 Ensure that the images are loaded in the offline image registry that is referenced in the YAML file. Obtain the image digest by running the following command: oc adm release info <tagged_openshift_release_image> | grep "Pull From" Replace <tagged_openshift_release_image> with the tagged image for the supported OpenShift Container Platform version. See the following example output: Pull From: quay.io/openshift-release-dev/ocp-release@sha256:69d1292f64a2b67227c5592c1a7d499c7d00376e498634ff8e1946bc9ccdddfe To learn more about the image tag and digest, see Referencing images in imagestreams . Create each of the clusterImageSets by entering the following command for each YAML file: Replace clusterImageSet_FILE with the name of the cluster image set file. For example: After running this command for each resource you want to add, the list of available release images are available. Alternately you can paste the image URL directly in the create cluster console. Adding the image URL creates new clusterImageSets if they do not exist. View the list of currently available release images in the console when you are creating a cluster. 1.6.2.4. Synchronizing available release images If you have the Red Hat Advanced Cluster Management hub cluster, which uses the MultiClusterHub operator to manage, upgrade, and install hub cluster components, you can synchronize the list of release images to ensure that you can select the latest available versions. Release images are available in the acm-hive-openshift-releases repository and are updated frequently. 1.6.2.4.1. Stability levels There are three levels of stability of the release images, as displayed in the following table: Table 1.4. Stability levels of release images Category Description candidate The most current images, which are not tested and might have some bugs. fast Images that are partially tested, but likely less stable than a stable version. stable These fully-tested images are confirmed to install and build clusters correctly. 1.6.2.4.2. Refreshing the release images list Complete the following steps to refresh and synchronize the list of images by using a Linux or Mac operating system: If the installer-managed acm-hive-openshift-releases subscription is enabled, disable the subscription by setting the value of disableUpdateClusterImageSets to true in the MultiClusterHub resource. Clone the acm-hive-openshift-releases GitHub repository. Remove the subscription by running the following command: oc delete -f subscribe/subscription-fast To synchronize and display the candidate release images, run the following command by using a Linux or Mac operating system: make subscribe-candidate After about one minute, the latest list of candidate release images is available. To synchronize and display the fast release images, run the following command: make subscribe-fast After about one minute, the latest list of fast release images is available. Connect to the stable release images and synchronize your Red Hat Advanced Cluster Management hub cluster. Run the following command using a Linux or Mac operating system: make subscribe-stable After about one minute, the list of available candidate , fast , and stable release images updates with the currently available images. View the list of currently available release images in the Red Hat Advanced Cluster Management console when you are creating a cluster. Unsubscribe from any of these channels to stop viewing the updates by running the following command: oc delete -f subscribe/subscription-fast 1.6.3. Host inventory introduction The host inventory management and on-premises cluster installation are available using the multicluster engine operator central infrastructure management feature. Central infrastructure management runs the Assisted Installer (also called infrastructure operator) as an operator on the hub cluster. You can use the console to create a host inventory, which is a pool of bare metal or virtual machines that you can use to create on-premises OpenShift Container Platform clusters. These clusters can be standalone, with dedicated machines for the control plane, or hosted control planes , where the control plane runs as pods on a hub cluster. You can install standalone clusters by using the console, API, or GitOps by using Zero Touch Provisioning (ZTP). See Installing GitOps ZTP in a disconnected environment in the Red Hat OpenShift Container Platform documentation for more information on ZTP. A machine joins the host inventory after booting with a Discovery Image. The Discovery Image is a Red Hat CoreOS live image that contains the following: An agent that performs discovery, validation, and installation tasks. The necessary configuration for reaching the service on the hub cluster, including the endpoint, token, and static network configuration, if applicable. You generally have a single Discovery Image for each infrastructure environment, which is a set of hosts sharing a common set of properties. The InfraEnv custom resource definition represents this infrastructure environment and associated Discovery Image. The image used is based on your OpenShift Container Platform version, which determines the operating system version that is selected. After the host boots and the agent contacts the service, the service creates a new Agent custom resource on the hub cluster representing that host. The Agent resources make up the host inventory. You can install hosts in the inventory as OpenShift nodes later. The agent writes the operating system to the disk, along with the necessary configuration, and reboots the host. Note: Red Hat Advanced Cluster Management 2.9 and later and central infrastructure management support the Nutanix platform by using AgentClusterInstall , which requires additional configuration by creating the Nutanix virtual machines. To learn more, see Optional: Installing on Nutanix in the Assisted Installer documentation. Continue reading to learn more about host inventories and central infrastructure management: Enabling the central infrastructure management service Enabling central infrastructure management on Amazon Web Services Creating a host inventory by using the console Creating a host inventory by using the command line interface Configuring advanced networking for an infrastructure environment Adding hosts to the host inventory by using the Discovery Image Automatically adding bare metal hosts to the host inventory Managing your host inventory Creating a cluster in an on-premises environment 1.6.3.1. Enabling the central infrastructure management service The central infrastructure management service is provided with the multicluster engine operator and deploys OpenShift Container Platform clusters. Central infrastructure management is deployed automatically when you enable the MultiClusterHub Operator on the hub cluster, but you have to enable the service manually. See the following sections: Creating a bare metal host custom resource definition Creating or modifying the Provisioning resource Enabling central infrastructure management in disconnected environments Enabling central infrastructure management in connected environments Installing a FIPS-enabled cluster by using the Infrastructure Operator for Red Hat OpenShift 1.6.3.1.1. Prerequisites See the following prerequisites before enabling the central infrastructure management service: You must have a deployed hub cluster on a supported OpenShift Container Platform version and a supported Red Hat Advanced Cluster Management for Kubernetes version. You need internet access for your hub cluster (connected), or a connection to an internal or mirror registry that has a connection to the internet (disconnected) to retrieve the required images for creating the environment. You must open the required ports for bare metal provisioning. See Ensuring required ports are open in the OpenShift Container Platform documentation. You need a bare metal host custom resource definition. You need an OpenShift Container Platform pull secret . See Using image pull secrets for more information. You need a configured default storage class. For disconnected environments only, complete the procedure for Clusters at the network far edge in the OpenShift Container Platform documentation. 1.6.3.1.2. Creating a bare metal host custom resource definition You need a bare metal host custom resource definition before enabling the central infrastructure management service. Check if you already have a bare metal host custom resource definition by running the following command: oc get crd baremetalhosts.metal3.io If you have a bare metal host custom resource definition, the output shows the date when the resource was created. If you do not have the resource, you receive an error that resembles the following: Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "baremetalhosts.metal3.io" not found If you do not have a bare metal host custom resource definition, download the metal3.io_baremetalhosts.yaml file and apply the content by running the following command to create the resource: oc apply -f 1.6.3.1.3. Creating or modifying the Provisioning resource You need a Provisioning resource before enabling the central infrastructure management service. Check if you have the Provisioning resource by running the following command: oc get provisioning If you already have a Provisioning resource, continue by Modifying the Provisioning resource . If you do not have a Provisioning resource, you receive a No resources found error. Continue by Creating the Provisioning resource . 1.6.3.1.3.1. Modifying the Provisioning resource If you already have a Provisioning resource, you must modify the resource if your hub cluster is installed on one of the following platforms: Bare metal Red Hat OpenStack Platform VMware vSphere User-provisioned infrastructure (UPI) method and the platform is None If your hub cluster is installed on a different platform, continue at Enabling central infrastructure management in disconnected environments or Enabling central infrastructure management in connected environments . Modify the Provisioning resource to allow the Bare Metal Operator to watch all namespaces by running the following command: oc patch provisioning provisioning-configuration --type merge -p '{"spec":{"watchAllNamespaces": true }}' 1.6.3.1.3.2. Creating the Provisioning resource If you do not have a Provisioning resource, complete the following steps: Create the Provisioning resource by adding the following YAML content: apiVersion: metal3.io/v1alpha1 kind: Provisioning metadata: name: provisioning-configuration spec: provisioningNetwork: "Disabled" watchAllNamespaces: true Apply the content by running the following command: oc apply -f 1.6.3.1.4. Enabling central infrastructure management in disconnected environments To enable central infrastructure management in disconnected environments, complete the following steps: Create a ConfigMap in the same namespace as your infrastructure operator to specify the values for ca-bundle.crt and registries.conf for your mirror registry. Your file ConfigMap might resemble the following example: apiVersion: v1 kind: ConfigMap metadata: name: <mirror-config> namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | <certificate-content> registries.conf: | unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] [[registry]] prefix = "" location = "registry.redhat.io/multicluster-engine" mirror-by-digest-only = true [[registry.mirror]] location = "mirror.registry.com:5000/multicluster-engine" Note: You must set mirror-by-digest-only to true because release images are specified by using a digest. Registries in the list of unqualified-search-registries are automatically added to an authentication ignore list in the PUBLIC_CONTAINER_REGISTRIES environment variable. The specified registries do not require authentication when the pull secret of the managed cluster is validated. Write the key pairs representing the headers and query parameters that you want to send with every osImage request. If you don't need both parameters, write key pairs for only headers or query parameters. Important: Headers and query parameters are only encrypted if you use HTTPS. Make sure to use HTTPS to avoid security issues. Create a file named headers and add content that resembles the following example: { "Authorization": "Basic xyz" } Create a file named query_params and add content that resembles the following example: { "api_key": "myexampleapikey", } Create a secret from the parameter files that you created by running the following command. If you only created one parameter file, remove the argument for the file that you didn't create: oc create secret generic -n multicluster-engine os-images-http-auth --from-file=./query_params --from-file=./headers If you want to use HTTPS osImages with a self-signed or third-party CA certificate, add the certificate to the image-service-additional-ca ConfigMap . To create a certificate, run the following command: oc -n multicluster-engine create configmap image-service-additional-ca --from-file=tls.crt Create the AgentServiceConfig custom resource by saving the following YAML content in the agent_service_config.yaml file: apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: name: agent spec: databaseStorage: accessModes: - ReadWriteOnce resources: requests: storage: <db_volume_size> filesystemStorage: accessModes: - ReadWriteOnce resources: requests: storage: <fs_volume_size> mirrorRegistryRef: name: <mirror_config> 1 unauthenticatedRegistries: - <unauthenticated_registry> 2 imageStorage: accessModes: - ReadWriteOnce resources: requests: storage: <img_volume_size> 3 OSImageAdditionalParamsRef: name: os-images-http-auth OSImageCACertRef: name: image-service-additional-ca osImages: - openshiftVersion: "<ocp_version>" 4 version: "<ocp_release_version>" 5 url: "<iso_url>" 6 cpuArchitecture: "x86_64" + 1 Replace mirror_config with the name of the ConfigMap that contains your mirror registry configuration details. 2 Include the optional unauthenticated_registry parameter if you are using a mirror registry that does not require authentication. Entries on this list are not validated or required to have an entry in the pull secret. 3 Replace img_volume_size with the size of the volume for the imageStorage field, for example 10Gi per operating system image. The minimum value is 10Gi , but the recommended value is at least 50Gi . This value specifies how much storage is allocated for the images of the clusters. You need to allow 1 GB of image storage for each instance of Red Hat Enterprise Linux CoreOS that is running. You might need to use a higher value if there are many clusters and instances of Red Hat Enterprise Linux CoreOS. 4 Replace ocp_version with the OpenShift Container Platform version to install, for example, 4.14 . 5 Replace ocp_release_version with the specific install version, for example, 49.83.202103251640-0 . 6 Replace iso_url with the ISO url, for example, https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.13/4.13.3/rhcos-4.13.3-x86_64-live.x86_64.iso . You can find other values at the rhoc . If you are using HTTPS osImages with self-signed or third-party CA certificates, reference the certificate in the OSImageCACertRef spec. Important: If you are using the late binding feature and the spec.osImages releases in the AgentServiceConfig custom resource are version 4.13 or later, the OpenShift Container Platform release images that you use when creating your clusters must be the same. The Red Hat Enterprise Linux CoreOS images for version 4.13 and later are not compatible with earlier images. You can verify that your central infrastructure management service is healthy by checking the assisted-service and assisted-image-service deployments and ensuring that their pods are ready and running. 1.6.3.1.5. Enabling central infrastructure management in connected environments To enable central infrastructure management in connected environments, create the AgentServiceConfig custom resource by saving the following YAML content in the agent_service_config.yaml file: apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: name: agent spec: databaseStorage: accessModes: - ReadWriteOnce resources: requests: storage: <db_volume_size> 1 filesystemStorage: accessModes: - ReadWriteOnce resources: requests: storage: <fs_volume_size> 2 imageStorage: accessModes: - ReadWriteOnce resources: requests: storage: <img_volume_size> 3 1 Replace db_volume_size with the volume size for the databaseStorage field, for example 10Gi . This value specifies how much storage is allocated for storing files such as database tables and database views for the clusters. The minimum value that is required is 1Gi . You might need to use a higher value if there are many clusters. 2 Replace fs_volume_size with the size of the volume for the filesystemStorage field, for example 200M per cluster and 2-3Gi per supported OpenShift Container Platform version. The minimum value that is required is 1Gi , but the recommended value is at least 100Gi . This value specifies how much storage is allocated for storing logs, manifests, and kubeconfig files for the clusters. You might need to use a higher value if there are many clusters. 3 Replace img_volume_size with the size of the volume for the imageStorage field, for example 10Gi per operating system image. The minimum value is 10Gi , but the recommended value is at least 50Gi . This value specifies how much storage is allocated for the images of the clusters. You need to allow 1 GB of image storage for each instance of Red Hat Enterprise Linux CoreOS that is running. You might need to use a higher value if there are many clusters and instances of Red Hat Enterprise Linux CoreOS. Your central infrastructure management service is configured. You can verify that it is healthy by checking the assisted-service and assisted-image-service deployments and ensuring that their pods are ready and running. 1.6.3.1.6. Installing a FIPS-enabled cluster by using the Infrastructure Operator for Red Hat OpenShift When you install a OpenShift Container Platform cluster version 4.15 and earlier that is in FIPS mode, you must specify that the installers run Red Hat Enterprise Linux (RHEL) version 8 in the AgentServiceConfig resource. Required access: You must have access to edit the AgentServiceConfig and AgentClusterInstall resources. Complete the following steps to update the AgentServiceConfig resource: Log in to you managed cluster by using the following command: oc login Add the agent-install.openshift.io/service-image-base: el8 annotation in the AgentServiceConfig resource. Your AgentServiceConfig resource might resemble the following YAML: apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: agent-install.openshift.io/service-image-base: el8 ... 1.6.3.1.7. Additional resources For additional information about zero touch provisioning, see Clusters at the network far edge in the OpenShift Container Platform documentation. See Using image pull secrets 1.6.3.2. Enabling central infrastructure management on Amazon Web Services If you are running your hub cluster on Amazon Web Services and want to enable the central infrastructure management service, complete the following steps after Enabling the central infrastructure management service : Make sure you are logged in at the hub cluster and find the unique domain configured on the assisted-image-service by running the following command: Your domain might resemble the following example: assisted-image-service-multicluster-engine.apps.<yourdomain>.com Make sure you are logged in at the hub cluster and create a new IngressController with a unique domain using the NLB type parameter. See the following example: apiVersion: operator.openshift.io/v1 kind: IngressController metadata: name: ingress-controller-with-nlb namespace: openshift-ingress-operator spec: domain: nlb-apps.<domain>.com routeSelector: matchLabels: router-type: nlb endpointPublishingStrategy: type: LoadBalancerService loadBalancer: scope: External providerParameters: type: AWS aws: type: NLB Add <yourdomain> to the domain parameter in IngressController by replacing <domain> in nlb-apps.<domain>.com with <yourdomain> . Apply the new IngressController by running the following command: Make sure that the value of the spec.domain parameter of the new IngressController is not in conflict with an existing IngressController by completing the following steps: List all IngressControllers by running the following command: Run the following command on each of the IngressControllers , except the ingress-controller-with-nlb that you just created: If the spec.domain report is missing, add a default domain that matches all of the routes that are exposed in the cluster except nlb-apps.<domain>.com . If the spec.domain report is provided, make sure that the nlb-apps.<domain>.com route is excluded from the specified range. Run the following command to edit the assisted-image-service route to use the nlb-apps location: The default namespace is where you installed the multicluster engine operator. Add the following lines to the assisted-image-service route: metadata: labels: router-type: nlb name: assisted-image-service In the assisted-image-service route, find the URL value of spec.host . The URL might resemble the following example: Replace apps in the URL with nlb-apps to match the domain configured in the new IngressController . To verify that the central infrastructure management service is enabled on Amazon Web Services, run the following command to verify that the pods are healthy: Create a new host inventory and ensure that the download URL uses the new nlb-apps URL. 1.6.3.3. Creating a host inventory by using the console You can create a host inventory (infrastructure environment) to discover physical or virtual machines that you can install your OpenShift Container Platform clusters on. 1.6.3.3.1. Prerequisites You must enable the central infrastructure management service. See Enabling the central infrastructure management service for more information. 1.6.3.3.2. Creating a host inventory Complete the following steps to create a host inventory by using the console: From the console, navigate to Infrastructure > Host inventory and click Create infrastructure environment . Add the following information to your host inventory settings: Name: A unique name for your infrastructure environment. Creating an infrastructure environment by using the console also creates a new namespace for the InfraEnv resource with the name you chose. If you create InfraEnv resources by using the command line interface and want to monitor the resources in the console, use the same name for your namespace and the InfraEnv . Network type: Specifies if the hosts you add to your infrastructure environment use DHCP or static networking. Static networking configuration requires additional steps. Location: Specifies the geographic location of the hosts. The geographic location can be used to define which data center the hosts are located. Labels: Optional field where you can add labels to the hosts that are discovered with this infrastructure environment. The specified location is automatically added to the list of labels. Infrastructure provider credentials: Selecting an infrastructure provider credential automatically populates the pull secret and SSH public key fields with information in the credential. For more information, see Creating a credential for an on-premises environment . Pull secret: Your OpenShift Container Platform pull secret that enables you to access the OpenShift Container Platform resources. This field is automatically populated if you selected an infrastructure provider credential. SSH public key: The SSH key that enables the secure communication with the hosts. You can use it to connect to the host for troubleshooting. After installing a cluster, you can no longer connect to the host with the SSH key. The key is generally in your id_rsa.pub file. The default file path is ~/.ssh/id_rsa.pub . This field is automatically populated if you selected an infrastructure provider credential that contains the value of a SSH public key. If you want to enable proxy settings for your hosts, select the setting to enable it and enter the following information: HTTP Proxy URL: The URL of the proxy for HTTP requests. HTTPS Proxy URL: The URL of the proxy for HTTP requests. The URL must start with HTTP. HTTPS is not supported. If you do not provide a value, your HTTP proxy URL is used by default for both HTTP and HTTPS connections. No Proxy domains: A list of domains separated by commas that you do not want to use the proxy with. Start a domain name with a period ( . ) to include all of the subdomains that are in that domain. Add an asterisk ( * ) to bypass the proxy for all destinations. Optionally add your own Network Time Protocol (NTP) sources by providing a comma separated list of IP or domain names of the NTP pools or servers. If you need advanced configuration options that are not available in the console, continue to Creating a host inventory by using the command line interface . If you do not need advanced configuration options, you can continue by configuring static networking, if required, and begin adding hosts to your infrastructure environment. 1.6.3.3.3. Accessing a host inventory To access a host inventory, select Infrastructure > Host inventory in the console. Select your infrastructure environment from the list to view the details and hosts. 1.6.3.3.4. Additional resources See Enabling the central infrastructure management service See Creating a credential for an on-premises environment See Creating a host inventory by using the command line interface If you completed this procedure as part of the process to configure hosted control planes on bare metal, your steps are to complete the following procedures: Adding hosts to the host inventory by using the Discovery Image Automatically adding bare metal hosts to the host inventory 1.6.3.4. Creating a host inventory by using the command line interface You can create a host inventory (infrastructure environment) to discover physical or virtual machines that you can install your OpenShift Container Platform clusters on. Use the command line interface instead of the console for automated deployments or for the following advanced configuration options: Automatically bind discovered hosts to an existing cluster definition Override the ignition configuration of the Discovery Image Control the iPXE behavior Modify kernel arguments for the Discovery Image Pass additional certificates that you want the host to trust during the discovery phase Select a Red Hat CoreOS version to boot for testing that is not the default option of the newest version 1.6.3.4.1. Prerequisite You must enable the central infrastructure management service. See Enabling the central infrastructure management service for more information. 1.6.3.4.2. Creating a host inventory Complete the following steps to create a host inventory (infrastructure environment) by using the command line interface: Log in to your hub cluster by running the following command: Create a namespace for your resource. Create the namespace.yaml file and add the following content: apiVersion: v1 kind: Namespace metadata: name: <your_namespace> 1 1 Use the same name for your namespace and your infrastructure environment to monitor your inventory in the console. Apply the YAML content by running the following command: Create a Secret custom resource containing your OpenShift Container Platform pull secret . Create the pull-secret.yaml file and add the following content: apiVersion: v1 kind: Secret type: kubernetes.io/dockerconfigjson metadata: name: pull-secret 1 namespace: <your_namespace> stringData: .dockerconfigjson: <your_pull_secret> 2 1 Add your namesapce. 2 Add your pull secret. Apply the YAML content by running the following command: Create the infrastructure environment. Create the infra-env.yaml file and add the following content. Replace values where needed: apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: myinfraenv namespace: <your_namespace> spec: proxy: httpProxy: <http://user:password@ipaddr:port> httpsProxy: <http://user:password@ipaddr:port> noProxy: additionalNTPSources: sshAuthorizedKey: pullSecretRef: name: <name> agentLabels: <key>: <value> nmStateConfigLabelSelector: matchLabels: <key>: <value> clusterRef: name: <cluster_name> namespace: <project_name> ignitionConfigOverride: '{"ignition": {"version": "3.1.0"}, ...}' cpuArchitecture: x86_64 ipxeScriptType: DiscoveryImageAlways kernelArguments: - operation: append value: audit=0 additionalTrustBundle: <bundle> osImageVersion: <version> Table 1.5. InfraEnv field table Field Optional or required Description proxy Optional Defines the proxy settings for agents and clusters that use the InfraEnv resource. If you do not set the proxy value, agents are not configured to use a proxy. httpProxy Optional The URL of the proxy for HTTP requests. The URL must start with http . HTTPS is not supported.. httpsProxy Optional The URL of the proxy for HTTP requests. The URL must start with http . HTTPS is not supported. noProxy Optional A list of domains and CIDRs separated by commas that you do not want to use the proxy with. additionalNTPSources Optional A list of Network Time Protocol (NTP) sources (hostname or IP) to add to all hosts. They are added to NTP sources that are configured by using other options, such as DHCP. sshAuthorizedKey Optional SSH public keys that are added to all hosts for use in debugging during the discovery phase. The discovery phase is when the host boots the Discovery Image. name Required The name of the Kubernetes secret containing your pull secret. agentLabels Optional Labels that are automatically added to the Agent resources representing the hosts that are discovered with your InfraEnv . Make sure to add your key and value. nmStateConfigLabelSelector Optional Consolidates advanced network configuration such as static IPs, bridges, and bonds for the hosts. The host network configuration is specified in one or more NMStateConfig resources with labels you choose. The nmStateConfigLabelSelector property is a Kubernetes label selector that matches your chosen labels. The network configuration for all NMStateConfig labels that match this label selector is included in the Discovery Image. When you boot, each host compares each configuration to its network interfaces and applies the appropriate configuration. To learn more about advanced network configuration, see link to section Configuring advanced networking for a host inventory . clusterRef Optional References an existing ClusterDeployment resource that describes a standalone on-premises cluster. Not set by default. If clusterRef is not set, then the hosts can be bound to one or more clusters later. You can remove the host from one cluster and add it to another. If clusterRef is set, then all hosts discovered with your InfraEnv are automatically bound to the specified cluster. If the cluster is not installed yet, then all discovered hosts are part of its installation. If the cluster is already installed, then all discovered hosts are added. ignitionConfigOverride Optional Modifies the ignition configuration of the Red Hat CoreOS live image, such as adding files. Make sure to only use ignitionConfigOverride if you need it. Must use ignition version 3.1.0, regardless of the cluster version. cpuArchitecture Optional Choose one of the following supported CPU architectures: x86_64, aarch64, ppc64le, or s390x. The default value is x86_64. ipxeScriptType Optional Causes the image service to always serve the iPXE script when set to the default value of DiscoveryImageAlways and when you are using iPXE to boot. As a result, the host boots from the network discovery image. Setting the value to BootOrderControl causes the image service to decide when to return the iPXE script, depending on the host state, which causes the host to boot from the disk when the host is provisioned and is part of a cluster. kernelArguments Optional Allows modifying the kernel arguments for when the Discovery Image boots. Possible values for operation are append , replace , or delete . additionalTrustBundle Optional A PEM-encoded X.509 certificate bundle, usually needed if the hosts are in a network with a re-encrypting man-in-the-middle (MITM) proxy, or if the hosts need to trust certificates for other purposes, such as container image registries. Hosts discovered by your InfraEnv trust the certificates in this bundle. Clusters created from the hosts discovered by your InfraEnv also trust the certificates in this bundle. osImageVersion Optional The Red Hat CoreOS image version to use for your InfraEnv . Make sure the version refers to the OS image specified in either the AgentServiceConfig.spec.osImages or in the default OS images list. Each release has a specific set of Red Hat CoreOS image versions. The OSImageVersion must match an OpenShift Container Platform version in the OS images list. You cannot specify OSImageVersion and ClusterRef at the same time. If you want to use another version of the Red Hat CoreOS image that does not exist by default, then you must manually add the version by specifying it in the AgentServiceConfig.spec.osImages . To learn more about adding versions, see Enabling the central infrastructure management service . Apply the YAML content by running the following command: To verify that your host inventory is created, check the status with the following command: See the following list of notable properties: conditions : The standard Kubernetes conditions indicating if the image was created succesfully. isoDownloadURL : The URL to download the Discovery Image. createdTime : The time at which the image was last created. If you modify the InfraEnv , make sure that the timestamp has been updated before downloading a new image. Note: If you modify the InfraEnv resource, make sure that the InfraEnv has created a new Discovery Image by looking at the createdTime property. If you already booted hosts, boot them again with the latest Discovery Image. You can continue by configuring static networking, if required, and begin adding hosts to your infrastructure environment. 1.6.3.4.3. Additional resources See Enabling the central infrastructure management service . 1.6.3.5. Configuring advanced networking for an infrastructure environment For hosts that require networking beyond DHCP on a single interface, you must configure advanced networking. The required configuration includes creating one or more instances of the NMStateConfig resource that describes the networking for one or more hosts. Each NMStateConfig resource must contain a label that matches the nmStateConfigLabelSelector on your InfraEnv resource. See Creating a host inventory by using the command line interface to learn more about the nmStateConfigLabelSelector . The Discovery Image contains the network configurations defined in all referenced NMStateConfig resources. After booting, each host compares each configuration to its network interfaces and applies the appropriate configuration. 1.6.3.5.1. Prerequisites You must enable the central infrastructure management service. You must create a host inventory. 1.6.3.5.2. Configuring advanced networking by using the command line interface To configure advanced networking for your infrastructure environment by using the command line interface, complete the following steps: Create a file named nmstateconfig.yaml and add content that is similar to the following template. Replace values where needed: apiVersion: agent-install.openshift.io/v1beta1 kind: NMStateConfig metadata: name: mynmstateconfig namespace: <your-infraenv-namespace> labels: some-key: <some-value> spec: config: interfaces: - name: eth0 type: ethernet state: up mac-address: 02:00:00:80:12:14 ipv4: enabled: true address: - ip: 192.168.111.30 prefix-length: 24 dhcp: false - name: eth1 type: ethernet state: up mac-address: 02:00:00:80:12:15 ipv4: enabled: true address: - ip: 192.168.140.30 prefix-length: 24 dhcp: false dns-resolver: config: server: - 192.168.126.1 routes: config: - destination: 0.0.0.0/0 -hop-address: 192.168.111.1 -hop-interface: eth1 table-id: 254 - destination: 0.0.0.0/0 -hop-address: 192.168.140.1 -hop-interface: eth1 table-id: 254 interfaces: - name: "eth0" macAddress: "02:00:00:80:12:14" - name: "eth1" macAddress: "02:00:00:80:12:15" Table 1.6. NMStateConfig field table Field Optional or required Description name Required Use a name that is relevant to the host or hosts you are configuring. namespace Required The namespace must match the namespace of your InfraEnv resource. some-key Required Add one or more labels that match the nmStateConfigLabelSelector on your InfraEnv resource. config Optional Describes the network settings in NMstate format. See Declarative Network API for the format specification and additional examples. The configuration can also apply to a single host, where you have one NMStateConfig resource per host, or can describe the interfaces for multiple hosts in a single NMStateConfig resource. interfaces Optional Describes the mapping between interface names found in the specified NMstate configuration and MAC addresses found on the hosts. Make sure the mapping uses physical interfaces present on a host. For example, when the NMState configuration defines a bond or VLAN, the mapping only contains an entry for parent interfaces. The mapping has the following purposes: * Allows you to use interface names in the configuration that do not match the interface names on a host. You might find this useful because the operating system chooses the interface names, which might not be predictable. * Tells a host what MAC addresses to look for after booting and applies the correct NMstate configuration. Note: The Image Service automatically creates a new image when you update any InfraEnv properties or change the NMStateConfig resources that match its label selector. If you add NMStateConfig resources after creating the InfraEnv resource, make sure that the InfraEnv creates a new Discovery Image by checking the createdTime property in your InfraEnv . If you already booted hosts, boot them again with the latest Discovery Image. Apply the YAML content by running the following command: 1.6.3.5.3. Additional resources See Creating a host inventory by using the command line interface See Declarative Network API 1.6.3.6. Adding hosts to the host inventory by using the Discovery Image After you create your host inventory (infrastructure environment), you can discover your hosts and add them to your inventory. To add hosts to your inventory, choose a method to download an ISO file and attach it to each server. For example, you can download ISO files by using a virtual media, or by writing the ISO file to a USB drive. Important: To prevent the installation from failing, keep the Discovery ISO media connected to the device during the installation process, and set each host to boot from the device one time. Prerequisites Adding hosts by using the console Adding hosts by using the command line interface 1.6.3.6.1. Prerequisites You must enable the central infrastructure management service. See Enabling the central infrastructure management service for more information. You must create a host inventory. See Creating a host inventory by using the console for more information. 1.6.3.6.2. Adding hosts by using the console Download the ISO file by completing the following steps: Select Infrastructure > Host inventory in the console. Select your infrastructure environment from the list. Click Add hosts and select With Discovery ISO . Approve each host so that you can use it. You can select hosts from the inventory table by clicking Actions and selecting Approve . 1.6.3.6.3. Adding hosts by using the command line interface The URL to download the ISO file in the isoDownloadURL property is in the status of your InfraEnv resource. See Creating a host inventory by using the command line interface for more information about the InfraEnv resource. Each booted host creates an Agent resource in the same namespace. Run the following command to view the download URL in the InfraEnv custom resource: oc get infraenv -n <infra env namespace> <infra env name> -o jsonpath='{.status.isoDownloadURL}' See the following output: Note: By default, the ISO that is provided is a minimal ISO. The minimal ISO does not contain the root file system, RootFS . The RootFS is downloaded later. To display full ISO, replace minimal.iso in the URL with full.iso . Use the URL to download the ISO file and boot your hosts with the ISO file. , you need to approve each host. See the following procedure: Run the following command to list all of your Agents : oc get agent -n <infra env namespace> You get an output that is similar to the following output: Approve any Agent from the list with a false approval status. Run the following command: oc patch agent -n <infra env namespace> <agent name> -p '{"spec":{"approved":true}}' --type merge Run the following command to confirm approval status: oc get agent -n <infra env namespace> You get an output that is similar to the following output with a true value: 1.6.3.6.4. Additional resources See Enabling the central infrastructure management service See Creating a host inventory by using the console See Creating a host inventory by using the command line interface 1.6.3.7. Automatically adding bare metal hosts to the host inventory After creating your infrastructure environment, you can discover your hosts and add them to your host inventory. You can automate booting the Discovery Image of your infrastructure environment by making the bare metal operator communicate with the Baseboard Management Controller (BMC) of each bare metal host. Create a BareMetalHost resource and associated BMC secret for each host. The automation is initiated by a label on the BareMetalHost that references your infrastructure environment. The automation performs the following actions: Boots each bare metal host with the Discovery Image represented by the infrastructure environment Reboots each host with the latest Discovery Image in case the infrastructure environment or any associated network configurations is updated Associates each Agent resource with its corresponding BareMetalHost resource upon discovery Updates Agent resource properties based on information from the BareMetalHost , such as hostname, role, and installation disk Approves the Agent for use as a cluster node 1.6.3.7.1. Prerequisites You must enable the central infrastructure management service. You must create a host inventory. 1.6.3.7.2. Adding bare metal hosts by using the console Complete the following steps to automatically add bare metal hosts to your host inventory by using the console: Select Infrastructure > Host inventory in the console. Select your infrastructure environment from the list. Click Add hosts and select With BMC Form . Add the required information and click Create . To learn more about BMC address formatting, see BMC addressing in the additional resources section. 1.6.3.7.3. Adding bare metal hosts by using the command line interface Complete the following steps to automatically add bare metal hosts to your host inventory by using the command line interface. Create a BMC secret by applying the following YAML content and replacing values where needed: apiVersion: v1 kind: Secret metadata: name: <bmc-secret-name> namespace: <your_infraenv_namespace> 1 type: Opaque data: username: <username> password: <password> 1 The namespace must be the same as the namespace of your InfraEnv . Create a bare metal host by applying the following YAML content and replacing values where needed: apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: <bmh-name> namespace: <your-infraenv-namespace> 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: <hostname> 2 bmac.agent-install.openshift.io/role: <role> 3 labels: infraenvs.agent-install.openshift.io: <your-infraenv> 4 spec: online: true automatedCleaningMode: disabled 5 bootMACAddress: <your-mac-address> 6 bmc: address: <machine-address> 7 credentialsName: <bmc-secret-name> 8 rootDeviceHints: deviceName: /dev/sda 9 1 The namespace must be the same as the namespace of your InfraEnv . 2 Optional: Replace with the name of your host. 3 Optional: Possible values are master or worker . 4 The name must match the name of your InfrEnv and exist in the same namespace. 5 If you do not set a value, the metadata value is automatically used. 6 Make sure the MAC address matches the MAC address of one of your host interfaces. 7 Use the address of the BMC. To learn more, see Port access for the out-of-band management IP address and BMC addressing in the additional resources section. 8 Make sure that the credentialsName value matches the name of the BMC secret you created. 9 Optional: Select the installation disk. See The BareMetalHost spec for the available root device hints. After the host is booted with the Discovery Image and the corresponding Agent resource is created, the installation disk is set according to this hint. After turning on the host, the image starts downloading. This might take a few minutes. When the host is discovered, an Agent custom resource is created automatically. 1.6.3.7.4. Disabling converged flow Converged flow is enabled by default. If your hosts do not appear, you might need to temporarily disable converged flow. To disable converged flow, complete the following steps: Create the following config map on your hub cluster: apiVersion: v1 kind: ConfigMap metadata: name: my-assisted-service-config namespace: multicluster-engine data: ALLOW_CONVERGED_FLOW: "false" Note: When you set ALLOW_CONVERGED_FLOW to "false" , you also disable any features enabled by the Ironic Python Agent. Apply the config map by running the following command: oc annotate --overwrite AgentServiceConfig agent unsupported.agent-install.openshift.io/assisted-service-configmap=my-assisted-service-config 1.6.3.7.5. Removing managed cluster nodes by using the command line interface To remove managed cluster nodes from a managed cluster, you need a hub cluster that is running on a supported OpenShift Container Platform version. Any static networking configuration required for the node to boot must be available. Make sure to not delete NMStateConfig resources when you delete the agent and bare metal host. 1.6.3.7.5.1. Removing managed cluster nodes with a bare metal host If you have a bare metal host on your hub cluster and want remove managed cluster nodes from a managed cluster, complete the following steps: Add the following annotation to the BareMetalHost resource of the node that you want to delete: Delete the BareMetalHost resource by running the following command. Replace <bmh-name> with the name of your BareMetalHost : 1.6.3.7.5.2. Removing managed cluster nodes without a bare metal host If you do not have a bare metal host on your hub cluster and you want to remove managed cluster nodes from a managed cluster, you can unbind the agent by removing the clusterDeploymentName field from the Agent specification, or delete the Agent custom resource that corresponds with the node that you are removing. If you want to delete an Agent resource from the hub cluster, but do not want the node removed from the managed cluster, you can set the annotation agent.agent-install.openshift.io/skip-spoke-cleanup to true on the Agent resource before you remove it. See the Deleting nodes instructions in the OpenShift Container Platform documentation. 1.6.3.7.6. Binding and unbinding hosts You can bind hosts to an Red Hat OpenShift Container Platform cluster by setting the spec.clusterDeploymentName field in the Agent custom resource, or by setting the {}bmac.agent-install.openshift.io/cluster-reference bare metal host annotation. The {}bmac.agent-install.openshift.io/cluster-reference bare metal host annotation controls the connection to your OpenShift Container Platform cluster, and binds or unbinds hosts to a specific cluster. You can use the {}bmac.agent-install.openshift.io/cluster-reference annotation in one of the following three ways: If you do not set the annotation in the bare metal host, no changes apply to the host. If you set the annotation with an empty string value, the host unbinds. If you set the annotation and use a string value that follows the <cluster-namespace>/<cluster-name> format, the host binds to the cluster that your ClusterDeployment custom resource represents. Note: If the InfraEnv that the host belongs to already contains a cluster-reference annotation, the {}bmac.agent-install.openshift.io/cluster-reference annotation is ignored. 1.6.3.7.7. Additional resources For additional information about zero touch provisioning, see Clusters at the network far edge in the OpenShift Container Platform documentation. To learn about the required ports for using a bare metal host, see Port access for the out-of-band management IP address in the OpenShift Container Platform documentation. To learn about root device hints, see Bare metal configuration in the OpenShift Container Platform documentation. See Using image pull secrets See Creating a credential for an on-premises environment To learn more about scaling compute machines, see Manually scaling a compute machine set in the OpenShift Container Platform documentation. To learn more about converged flow, see Managed cluster stuck in Pending status after deployment . To learn more about BMC format addressing, see BMC addressing in the OpenShift Container Platform documentation. 1.6.3.8. Managing your host inventory You can manage your host inventory and edit existing hosts by using the console, or by using the command line interface and editing the Agent resource. 1.6.3.8.1. Managing your host inventory by using the console Each host that you successfully boot with the Discovery ISO appears as a row in your host inventory. You can use the console to edit and manage your hosts. If you booted the host manually and are not using the bare metal operator automation, you must approve the host in the console before you can use it. Hosts that are ready to be installed as OpenShift nodes have the Available status. 1.6.3.8.2. Managing your host inventory by using the command line interface An Agent resource represents each host. You can set the following properties in an Agent resource: clusterDeploymentName Set this property to the namespace and name of the ClusterDeployment you want to use if you want to install the host as a node in a cluster. Optional: role Sets the role for the host in the cluster. Possible values are master , worker , and auto-assign . The default value is auto-assign . hostname Sets the host name for the host. Optional if the host is automatically assigned a valid host name, for example by using DHCP. approved Indicates if the host can be installed as an OpenShift node. This property is a boolean with a default value of False . If you booted the host manually and are not using the bare metal operator automation, you must set this property to True before installing the host. installation_disk_id The ID of the installation disk you chose that is visible in the inventory of the host. installerArgs A JSON-formatted string containing overrides for the coreos-installer arguments of the host. You can use this property to modify kernel arguments. See the following example syntax: ignitionConfigOverrides A JSON-formatted string containing overrides for the ignition configuration of the host. You can use this property to add files to the host by using ignition. See the following example syntax: nodeLabels A list of labels that are applied to the node after the host is installed. The status of an Agent resource has the following properties: role Sets the role for the host in the cluster. If you previously set a role in the Agent resource, the value appears in the status . inventory Contains host properties that the agent running on the host discovers. progress The host installation progress. ntpSources The configured Network Time Protocol (NTP) sources of the host. conditions Contains the following standard Kubernetes conditions with a True or False value: SpecSynced: True if all specified properties are successfully applied. False if some error was encountered. Connected: True if the agent connection to the installation service is not obstructed. False if the agent has not contacted the installation service in some time. RequirementsMet: True if the host is ready to begin the installation. Validated: True if all host validations pass. Installed: True if the host is installed as an OpenShift node. Bound: True if the host is bound to a cluster. Cleanup: False if the request to delete the Agent resouce fails. debugInfo Contains URLs for downloading installation logs and events. validationsInfo Contains information about validations that the agent runs after the host is discovered to ensure that the installation is successful. Troubleshoot if the value is False . installation_disk_id The ID of the installation disk you chose that is visible in the inventory of the host. 1.6.3.8.3. Additional resources See Accessing a host inventory See coreos-installer install 1.6.4. Cluster creation Learn how to create Red Hat OpenShift Container Platform clusters across cloud providers with multicluster engine operator. multicluster engine operator uses the Hive operator that is provided with OpenShift Container Platform to provision clusters for all providers except the on-premises clusters and hosted control planes. When provisioning the on-premises clusters, multicluster engine operator uses the central infrastructure management and Assisted Installer function that are provided with OpenShift Container Platform. The hosted clusters for hosted control planes are provisioned by using the HyperShift operator. Configuring additional manifests during cluster creation Creating a cluster on Amazon Web Services Creating a cluster on Amazon Web Services GovCloud Creating a cluster on Microsoft Azure Creating a cluster on Google Cloud Platform Creating a cluster on VMware vSphere Creating a cluster on Red Hat OpenStack Platform Creating a cluster in an on-premises environment Creating a cluster in a proxy environment Configuring AgentClusterInstall proxy Hosted control planes 1.6.4.1. Creating a cluster with the CLI The multicluster engine for Kubernetes operator uses internal Hive components to create Red Hat OpenShift Container Platform clusters. See the following information to learn how to create clusters. Prerequisites Create a cluster with ClusterDeployment Create a cluster with cluster pool 1.6.4.1.1. Prerequisites Before creating a cluster, you must clone the clusterImageSets repository and apply it to your hub cluster. See the following steps: Run the following command to clone, but replace 2.x with 2.6: Run the following command to apply it to your hub cluster: Select the Red Hat OpenShift Container Platform release images when you create a cluster. Note: If you use the Nutanix platform, be sure to use x86_64 architecture for the releaseImage in the ClusterImageSet resource and set the visible label value to 'true' . See the following example: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: stable visible: 'true' name: img4.x.47-x86-64-appsub spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.x.47-x86_64 1.6.4.1.2. Create a cluster with ClusterDeployment A ClusterDeployment is a Hive custom resource that is used to control the lifecycle of a cluster. Follow the Using Hive documentation to create the ClusterDeployment custom resource and create an individual cluster. 1.6.4.1.3. Create a cluster with ClusterPool A ClusterPool is also a Hive custom resource that is used to create multiple clusters. Follow the Cluster Pools documentation to create a cluster with the Hive ClusterPool API. 1.6.4.2. Configuring additional manifests during cluster creation You can configure additional Kubernetes resource manifests during the installation process of creating your cluster. This can help if you need to configure additional manifests for scenarios such as configuring networking or setting up a load balancer. 1.6.4.2.1. Prerequisite Add a reference to the ClusterDeployment resource that specifies a config map resource that contains the additional resource manifests. Note: The ClusterDeployment resource and the config map must be in the same namespace. 1.6.4.2.2. Configuring additional manifests during cluster creation by using examples If you want to configure additional manifests by using a config map with resource manifests, complete the following steps: Create a YAML file and add the following example content: kind: ConfigMap apiVersion: v1 metadata: name: <my-baremetal-cluster-install-manifests> namespace: <mynamespace> data: 99_metal3-config.yaml: | kind: ConfigMap apiVersion: v1 metadata: name: metal3-config namespace: openshift-machine-api data: http_port: "6180" provisioning_interface: "enp1s0" provisioning_ip: "172.00.0.3/24" dhcp_range: "172.00.0.10,172.00.0.100" deploy_kernel_url: "http://172.00.0.3:6180/images/ironic-python-agent.kernel" deploy_ramdisk_url: "http://172.00.0.3:6180/images/ironic-python-agent.initramfs" ironic_endpoint: "http://172.00.0.3:6385/v1/" ironic_inspector_endpoint: "http://172.00.0.3:5150/v1/" cache_url: "http://192.168.111.1/images" rhcos_image_url: "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3/43.81.201911192044.0/x86_64/rhcos-43.81.201911192044.0-openstack.x86_64.qcow2.gz" Note: The example ConfigMap contains a manifest with another ConfigMap resource. The resource manifest ConfigMap can contain multiple keys with resource configurations added in the following pattern, data.<resource_name>\.yaml . Apply the file by running the following command: oc apply -f <filename>.yaml If you want to configure additional manifests by using a ClusterDeployment by referencing a resource manifest ConfigMap , complete the following steps: Create a YAML file and add the following example content. The resource manifest ConfigMap is referenced in spec.provisioning.manifestsConfigMapRef : apiVersion: hive.openshift.io/v1 kind: ClusterDeployment metadata: name: <my-baremetal-cluster> namespace: <mynamespace> annotations: hive.openshift.io/try-install-once: "true" spec: baseDomain: test.example.com clusterName: <my-baremetal-cluster> controlPlaneConfig: servingCertificates: {} platform: baremetal: libvirtSSHPrivateKeySecretRef: name: provisioning-host-ssh-private-key provisioning: installConfigSecretRef: name: <my-baremetal-cluster-install-config> sshPrivateKeySecretRef: name: <my-baremetal-hosts-ssh-private-key> manifestsConfigMapRef: name: <my-baremetal-cluster-install-manifests> imageSetRef: name: <my-clusterimageset> sshKnownHosts: - "10.1.8.90 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXvVVVKUYVkuyvkuygkuyTCYTytfkufTYAAAAIbmlzdHAyNTYAAABBBKWjJRzeUVuZs4yxSy4eu45xiANFIIbwE3e1aPzGD58x/NX7Yf+S8eFKq4RrsfSaK2hVJyJjvVIhUsU9z2sBJP8=" pullSecretRef: name: <my-baremetal-cluster-pull-secret> Apply the file by running the following command: oc apply -f <filename>.yaml 1.6.4.3. Creating a cluster on Amazon Web Services You can use the multicluster engine operator console to create a Red Hat OpenShift Container Platform cluster on Amazon Web Services (AWS). When you create a cluster, the creation process uses the OpenShift Container Platform installer with the Hive resource. If you have questions about cluster creation after completing this procedure, see Installing on AWS in the OpenShift Container Platform documentation for more information about the process. Prerequisites Creating your AWS cluster Creating your cluster with the console 1.6.4.3.1. Prerequisites See the following prerequisites before creating a cluster on AWS: You must have a deployed hub cluster. You need an AWS credential. See Creating a credential for Amazon Web Services for more information. You need a configured domain in AWS. See Configuring an AWS account for instructions on how to configure a domain. You must have Amazon Web Services (AWS) login credentials, which include user name, password, access key ID, and secret access key. See Understanding and Getting Your Security Credentials . You must have an OpenShift Container Platform image pull secret. See Using image pull secrets . Note: If you change your cloud provider access key on the cloud provider, you also need to manually update the corresponding credential for the cloud provider on the console. This is required when your credentials expire on the cloud provider where the managed cluster is hosted and you try to delete the managed cluster. 1.6.4.3.2. Creating your AWS cluster See the following important information about creating an AWS cluster: When you review your information and optionally customize it before creating the cluster, you can select YAML: On to view the install-config.yaml file content in the panel. You can edit the YAML file with your custom settings, if you have any updates. When you create a cluster, the controller creates a namespace for the cluster and the resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base DNS domain that is associated with the selected credential that you configured with your AWS account, that value is populated in the field. You can change the value by overwriting it. This name is used in the hostname of the cluster. The release image identifies the version of the OpenShift Container Platform image that is used to create the cluster. Select the image from the list of images that are available. If the image that you want to use is not available, you can enter the URL to the image that you want to use. The node pools include the control plane pool and the worker pools. The control plane nodes share the management of the cluster activity. The information includes the following fields: Region: Specify the region where you want the node pool. CPU architecture: If the architecture type of the managed cluster is not the same as the architecture of your hub cluster, enter a value for the instruction set architecture of the machines in the pool. Valid values are amd64 , ppc64le , s390x , and arm64 . Zones: Specify where you want to run your control plane pools. You can select multiple zones within the region for a more distributed group of control plane nodes. A closer zone might provide faster performance, but a more distant zone might be more distributed. Instance type: Specify the instance type for your control plane node. You can change the type and size of your instance after it is created. Root storage: Specify the amount of root storage to allocate for the cluster. You can create zero or more worker nodes in a worker pool to run the container workloads for the cluster. This can be in a single worker pool, or distributed across multiple worker pools. If zero worker nodes are specified, the control plane nodes also function as worker nodes. The optional information includes the following fields: Zones: Specify where you want to run your worker pools. You can select multiple zones within the region for a more distributed group of nodes. A closer zone might provide faster performance, but a more distant zone might be more distributed. Instance type: Specify the instance type of your worker pools. You can change the type and size of your instance after it is created. Node count: Specify the node count of your worker pool. This setting is required when you define a worker pool. Root storage: Specify the amount of root storage allocated for your worker pool. This setting is required when you define a worker pool. Networking details are required for your cluster, and multiple networks are required for using IPv6. You can add an additional network by clicking Add network . Proxy information that is provided in the credential is automatically added to the proxy fields. You can use the information as it is, overwrite it, or add the information if you want to enable a proxy. The following list contains the required information for creating a proxy: HTTP proxy: Specify the URL that should be used as a proxy for HTTP traffic. HTTPS proxy: Specify the secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy sites: A comma-separated list of sites that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. 1.6.4.3.3. Creating your cluster with the console To create a new cluster, see the following procedure. If you have an existing cluster that you want to import instead, see Cluster import . Note: You do not have to run the oc command that is provided with the cluster details to import the cluster. When you create the cluster, it is automatically configured under the management of multicluster engine operator. Navigate to Infrastructure > Clusters . On the Clusters page. Click Cluster > Create cluster and complete the steps in the console. Optional: Select YAML: On to view content updates as you enter the information in the console. If you need to create a credential, see Creating a credential for Amazon Web Services for more information. The name of the cluster is used in the hostname of the cluster. If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. 1.6.4.3.4. Additional resources The AWS private configuration information is used when you are creating an AWS GovCloud cluster. See Creating a cluster on Amazon Web Services GovCloud for information about creating a cluster in that environment. See Configuring an AWS account for more information. See Release images for more information about release images. Find more information about supported instant types by visiting your cloud provider sites, such as AWS General purpose instances . 1.6.4.4. Creating a cluster on Amazon Web Services GovCloud You can use the console to create a Red Hat OpenShift Container Platform cluster on Amazon Web Services (AWS) or on AWS GovCloud. This procedure explains how to create a cluster on AWS GovCloud. See Creating a cluster on Amazon Web Services for the instructions for creating a cluster on AWS. AWS GovCloud provides cloud services that meet additional requirements that are necessary to store government documents on the cloud. When you create a cluster on AWS GovCloud, you must complete additional steps to prepare your environment. When you create a cluster, the creation process uses the OpenShift Container Platform installer with the Hive resource. If you have questions about cluster creation after completing this procedure, see Installing a cluster on AWS into a government region in the OpenShift Container Platform documentation for more information about the process. The following sections provide the steps for creating a cluster on AWS GovCloud: Prerequisites Configure Hive to deploy on AWS GovCloud Creating your cluster with the console 1.6.4.4.1. Prerequisites You must have the following prerequisites before creating an AWS GovCloud cluster: You must have AWS login credentials, which include user name, password, access key ID, and secret access key. See Understanding and Getting Your Security Credentials . You need an AWS credential. See Creating a credential for Amazon Web Services for more information. You need a configured domain in AWS. See Configuring an AWS account for instructions on how to configure a domain. You must have an OpenShift Container Platform image pull secret. See Using image pull secrets . You must have an Amazon Virtual Private Cloud (VPC) with an existing Red Hat OpenShift Container Platform cluster for the hub cluster. This VPC must be different from the VPCs that are used for the managed cluster resources or the managed cluster service endpoints. You need a VPC where the managed cluster resources are deployed. This cannot be the same as the VPCs that are used for the hub cluster or the managed cluster service endpoints. You need one or more VPCs that provide the managed cluster service endpoints. This cannot be the same as the VPCs that are used for the hub cluster or the managed cluster resources. Ensure that the IP addresses of the VPCs that are specified by Classless Inter-Domain Routing (CIDR) do not overlap. You need a HiveConfig custom resource that references a credential within the Hive namespace. This custom resource must have access to create resources on the VPC that you created for the managed cluster service endpoints. Note: If you change your cloud provider access key on the cloud provider, you also need to manually update the corresponding credential for the cloud provider on the multicluster engine operator console. This is required when your credentials expire on the cloud provider where the managed cluster is hosted and you try to delete the managed cluster. 1.6.4.4.2. Configure Hive to deploy on AWS GovCloud While creating a cluster on AWS GovCloud is almost identical to creating a cluster on standard AWS, you have to complete some additional steps to prepare an AWS PrivateLink for the cluster on AWS GovCloud. 1.6.4.4.2.1. Create the VPCs for resources and endpoints As listed in the prerequisites, two VPCs are required in addition to the VPC that contains the hub cluster. See Create a VPC in the Amazon Web Services documentation for specific steps for creating a VPC. Create a VPC for the managed cluster with private subnets. Create one or more VPCs for the managed cluster service endpoints with private subnets. Each VPC in a region has a limit of 255 VPC endpoints, so you need multiple VPCs to support more than 255 clusters in that region. For each VPC, create subnets in all of the supported availability zones of the region. Each subnet must have at least 255 usable IP addresses because of the controller requirements. The following example shows how you might structure subnets for VPCs that have 6 availability zones in the us-gov-east-1 region: Ensure that all of the hub environments (hub cluster VPCs) have network connectivity to the VPCs that you created for VPC endpoints that use peering, transit gateways, and that all DNS settings are enabled. Collect a list of VPCs that are needed to resolve the DNS setup for the AWS PrivateLink, which is required for the AWS GovCloud connectivity. This includes at least the VPC of the multicluster engine operator instance that you are configuring, and can include the list of all of the VPCs where various Hive controllers exist. 1.6.4.4.2.2. Configure the security groups for the VPC endpoints Each VPC endpoint in AWS has a security group attached to control access to the endpoint. When Hive creates a VPC endpoint, it does not specify a security group. The default security group of the VPC is attached to the VPC endpoint. The default security group of the VPC must have rules to allow traffic where VPC endpoints are created from the Hive installer pods. See Control access to VPC endpoints using endpoint policies in the AWS documentation for details. For example, if Hive is running in hive-vpc(10.1.0.0/16) , there must be a rule in the default security group of the VPC where the VPC endpoint is created that allows ingress from 10.1.0.0/16 . 1.6.4.4.2.3. Set permissions for AWS PrivateLink You need multiple credentials to configure the AWS PrivateLink. The required permissions for these credentials depend on the type of credential. The credentials for ClusterDeployment require the following permissions: The credentials for HiveConfig for endpoint VPCs account .spec.awsPrivateLink.credentialsSecretRef require the following permissions: The credentials specified in the HiveConfig custom resource for associating VPCs to the private hosted zone ( .spec.awsPrivateLink.associatedVPCs[USDidx].credentialsSecretRef ). The account where the VPC is located requires the following permissions: Ensure that there is a credential secret within the Hive namespace on the hub cluster. The HiveConfig custom resource needs to reference a credential within the Hive namespace that has permissions to create resources in a specific provided VPC. If the credential that you are using to provision an AWS cluster in AWS GovCloud is already in the Hive namespace, then you do not need to create another one. If the credential that you are using to provision an AWS cluster in AWS GovCloud is not already in the Hive namespace, you can either replace your current credential or create an additional credential in the Hive namespace. The HiveConfig custom resource needs to include the following content: An AWS GovCloud credential that has the required permissions to provision resources for the given VPC. The addresses of the VPCs for the OpenShift Container Platform cluster installation, as well as the service endpoints for the managed cluster. Best practice: Use different VPCs for the OpenShift Container Platform cluster installation and the service endpoints. The following example shows the credential content: spec: awsPrivateLink: ## The list of inventory of VPCs that can be used to create VPC ## endpoints by the controller. endpointVPCInventory: - region: us-east-1 vpcID: vpc-1 subnets: - availabilityZone: us-east-1a subnetID: subnet-11 - availabilityZone: us-east-1b subnetID: subnet-12 - availabilityZone: us-east-1c subnetID: subnet-13 - availabilityZone: us-east-1d subnetID: subnet-14 - availabilityZone: us-east-1e subnetID: subnet-15 - availabilityZone: us-east-1f subnetID: subnet-16 - region: us-east-1 vpcID: vpc-2 subnets: - availabilityZone: us-east-1a subnetID: subnet-21 - availabilityZone: us-east-1b subnetID: subnet-22 - availabilityZone: us-east-1c subnetID: subnet-23 - availabilityZone: us-east-1d subnetID: subnet-24 - availabilityZone: us-east-1e subnetID: subnet-25 - availabilityZone: us-east-1f subnetID: subnet-26 ## The credentialsSecretRef references a secret with permissions to create. ## The resources in the account where the inventory of VPCs exist. credentialsSecretRef: name: <hub-account-credentials-secret-name> ## A list of VPC where various mce clusters exists. associatedVPCs: - region: region-mce1 vpcID: vpc-mce1 credentialsSecretRef: name: <credentials-that-have-access-to-account-where-MCE1-VPC-exists> - region: region-mce2 vpcID: vpc-mce2 credentialsSecretRef: name: <credentials-that-have-access-to-account-where-MCE2-VPC-exists> You can include a VPC from all the regions where AWS PrivateLink is supported in the endpointVPCInventory list. The controller selects a VPC that meets the requirements for the ClusterDeployment. For more information, refer to the Hive documentation . 1.6.4.4.3. Creating your cluster with the console To create a cluster from the console, navigate to Infrastructure > Clusters > Create cluster AWS > Standalone and complete the steps in the console. Note: This procedure is for creating a cluster. If you have an existing cluster that you want to import, see Cluster import for those steps. The credential that you select must have access to the resources in an AWS GovCloud region, if you create an AWS GovCloud cluster. You can use an AWS GovCloud secret that is already in the Hive namespace if it has the required permissions to deploy a cluster. Existing credentials are displayed in the console. If you need to create a credential, see Creating a credential for Amazon Web Services for more information. The name of the cluster is used in the hostname of the cluster. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Tip: Select YAML: On to view content updates as you enter the information in the console. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base DNS domain that is associated with the selected credential that you configured with your AWS or AWS GovCloud account, that value is populated in the field. You can change the value by overwriting it. This name is used in the hostname of the cluster. See Configuring an AWS account for more information. The release image identifies the version of the OpenShift Container Platform image that is used to create the cluster. If the version that you want to use is available, you can select the image from the list of images. If the image that you want to use is not a standard image, you can enter the URL to the image that you want to use. See Release images for more information about release images. The node pools include the control plane pool and the worker pools. The control plane nodes share the management of the cluster activity. The information includes the following fields: Region: The region where you create your cluster resources. If you are creating a cluster on an AWS GovCloud provider, you must include an AWS GovCloud region for your node pools. For example, us-gov-west-1 . CPU architecture: If the architecture type of the managed cluster is not the same as the architecture of your hub cluster, enter a value for the instruction set architecture of the machines in the pool. Valid values are amd64 , ppc64le , s390x , and arm64 . Zones: Specify where you want to run your control plane pools. You can select multiple zones within the region for a more distributed group of control plane nodes. A closer zone might provide faster performance, but a more distant zone might be more distributed. Instance type: Specify the instance type for your control plane node, which must be the same as the CPU architecture that you previously indicated. You can change the type and size of your instance after it is created. Root storage: Specify the amount of root storage to allocate for the cluster. You can create zero or more worker nodes in a worker pool to run the container workloads for the cluster. They can be in a single worker pool, or distributed across multiple worker pools. If zero worker nodes are specified, the control plane nodes also function as worker nodes. The optional information includes the following fields: Pool name: Provide a unique name for your pool. Zones: Specify where you want to run your worker pools. You can select multiple zones within the region for a more distributed group of nodes. A closer zone might provide faster performance, but a more distant zone might be more distributed. Instance type: Specify the instance type of your worker pools. You can change the type and size of your instance after it is created. Node count: Specify the node count of your worker pool. This setting is required when you define a worker pool. Root storage: Specify the amount of root storage allocated for your worker pool. This setting is required when you define a worker pool. Networking details are required for your cluster, and multiple networks are required for using IPv6. For an AWS GovCloud cluster, enter the values of the block of addresses of the Hive VPC in the Machine CIDR field. You can add an additional network by clicking Add network . Proxy information that is provided in the credential is automatically added to the proxy fields. You can use the information as it is, overwrite it, or add the information if you want to enable a proxy. The following list contains the required information for creating a proxy: HTTP proxy URL: Specify the URL that should be used as a proxy for HTTP traffic. HTTPS proxy URL: Specify the secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy domains: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. When creating an AWS GovCloud cluster or using a private environment, complete the fields on the AWS private configuration page with the AMI ID and the subnet values. Ensure that the value of spec:platform:aws:privateLink:enabled is set to true in the ClusterDeployment.yaml file, which is automatically set when you select Use private configuration . When you review your information and optionally customize it before creating the cluster, you can select YAML: On to view the install-config.yaml file content in the panel. You can edit the YAML file with your custom settings, if you have any updates. Note: You do not have to run the oc command that is provided with the cluster details to import the cluster. When you create the cluster, it is automatically configured under the management of multicluster engine for Kubernetes operator. If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. Continue with Accessing your cluster for instructions for accessing your cluster. 1.6.4.5. Creating a cluster on Microsoft Azure You can use the multicluster engine operator console to deploy a Red Hat OpenShift Container Platform cluster on Microsoft Azure or on Microsoft Azure Government. When you create a cluster, the creation process uses the OpenShift Container Platform installer with the Hive resource. If you have questions about cluster creation after completing this procedure, see Installing on Azure in the OpenShift Container Platform documentation for more information about the process. Prerequisites Creating your cluster with the console 1.6.4.5.1. Prerequisites See the following prerequisites before creating a cluster on Azure: You must have a deployed hub cluster. You need an Azure credential. See Creating a credential for Microsoft Azure for more information. You need a configured domain in Azure or Azure Government. See Configuring a custom domain name for an Azure cloud service for instructions on how to configure a domain. You need Azure login credentials, which include user name and password. See the Microsoft Azure Portal . You need Azure service principals, which include clientId , clientSecret , and tenantId . See azure.microsoft.com . You need an OpenShift Container Platform image pull secret. See Using image pull secrets . Note: If you change your cloud provider access key on the cloud provider, you also need to manually update the corresponding credential for the cloud provider on the console of multicluster engine operator. This is required when your credentials expire on the cloud provider where the managed cluster is hosted and you try to delete the managed cluster. 1.6.4.5.2. Creating your cluster with the console To create a cluster from the multicluster engine operator console, navigate to Infrastructure > Clusters . On the Clusters page, click Create cluster and complete the steps in the console. Note: This procedure is for creating a cluster. If you have an existing cluster that you want to import, see Cluster import for those steps. If you need to create a credential, see Creating a credential for Microsoft Azure for more information. The name of the cluster is used in the hostname of the cluster. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Tip: Select YAML: On to view content updates as you enter the information in the console. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base DNS domain that is associated with the selected credential that you configured for your Azure account, that value is populated in that field. You can change the value by overwriting it. See Configuring a custom domain name for an Azure cloud service for more information. This name is used in the hostname of the cluster. The release image identifies the version of the OpenShift Container Platform image that is used to create the cluster. If the version that you want to use is available, you can select the image from the list of images. If the image that you want to use is not a standard image, you can enter the URL to the image that you want to use. See Release images for more information about release images. The Node pools include the control plane pool and the worker pools. The control plane nodes share the management of the cluster activity. The information includes the following optional fields: Region: Specify a region where you want to run your node pools. You can select multiple zones within the region for a more distributed group of control plane nodes. A closer zone might provide faster performance, but a more distant zone might be more distributed. CPU architecture: If the architecture type of the managed cluster is not the same as the architecture of your hub cluster, enter a value for the instruction set architecture of the machines in the pool. Valid values are amd64 , ppc64le , s390x , and arm64 . You can change the type and size of the Instance type and Root storage allocation (required) of your control plane pool after your cluster is created. You can create one or more worker nodes in a worker pool to run the container workloads for the cluster. They can be in a single worker pool, or distributed across multiple worker pools. If zero worker nodes are specified, the control plane nodes also function as worker nodes. The information includes the following fields: Zones: Specifies here you want to run your worker pools. You can select multiple zones within the region for a more distributed group of nodes. A closer zone might provide faster performance, but a more distant zone might be more distributed. Instance type: You can change the type and size of your instance after it is created. You can add an additional network by clicking Add network . You must have more than one network if you are using IPv6 addresses. Proxy information that is provided in the credential is automatically added to the proxy fields. You can use the information as it is, overwrite it, or add the information if you want to enable a proxy. The following list contains the required information for creating a proxy: HTTP proxy: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy: A comma-separated list of domains that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. When you review your information and optionally customize it before creating the cluster, you can click the YAML switch On to view the install-config.yaml file content in the panel. You can edit the YAML file with your custom settings, if you have any updates. If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. Note: You do not have to run the oc command that is provided with the cluster details to import the cluster. When you create the cluster, it is automatically configured under the management of multicluster engine operator. Continue with Accessing your cluster for instructions for accessing your cluster. 1.6.4.6. Creating a cluster on Google Cloud Platform Follow the procedure to create a Red Hat OpenShift Container Platform cluster on Google Cloud Platform (GCP). For more information about GCP, see Google Cloud Platform . When you create a cluster, the creation process uses the OpenShift Container Platform installer with the Hive resource. If you have questions about cluster creation after completing this procedure, see Installing on GCP in the OpenShift Container Platform documentation for more information about the process. Prerequisites Creating your cluster with the console 1.6.4.6.1. Prerequisites See the following prerequisites before creating a cluster on GCP: You must have a deployed hub cluster. You must have a GCP credential. See Creating a credential for Google Cloud Platform for more information. You must have a configured domain in GCP. See Setting up a custom domain for instructions on how to configure a domain. You need your GCP login credentials, which include user name and password. You must have an OpenShift Container Platform image pull secret. See Using image pull secrets . Note: If you change your cloud provider access key on the cloud provider, you also need to manually update the corresponding credential for the cloud provider on the console of multicluster engine operator. This is required when your credentials expire on the cloud provider where the managed cluster is hosted and you try to delete the managed cluster. 1.6.4.6.2. Creating your cluster with the console To create clusters from the multicluster engine operator console, navigate to Infrastructure > Clusters . On the Clusters page, click Create cluster and complete the steps in the console. Note: This procedure is for creating a cluster. If you have an existing cluster that you want to import, see Cluster import for those steps. If you need to create a credential, see Creating a credential for Google Cloud Platform for more information. The name of your cluster is used in the hostname of the cluster. There are some restrictions that apply to naming your GCP cluster. These restrictions include not beginning the name with goog or containing a group of letters and numbers that resemble google anywhere in the name. See Bucket naming guidelines for the complete list of restrictions. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Tip: Select YAML: On to view content updates as you enter the information in the console. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base DNS domain that is associated with the selected credential for your GCP account, that value is populated in the field. You can change the value by overwriting it. See Setting up a custom domain for more information. This name is used in the hostname of the cluster. The release image identifies the version of the OpenShift Container Platform image that is used to create the cluster. If the version that you want to use is available, you can select the image from the list of images. If the image that you want to use is not a standard image, you can enter the URL to the image that you want to use. See Release images for more information about release images. The Node pools include the control plane pool and the worker pools. The control plane nodes share the management of the cluster activity. The information includes the following fields: Region: Specify a region where you want to run your control plane pools. A closer region might provide faster performance, but a more distant region might be more distributed. CPU architecture: If the architecture type of the managed cluster is not the same as the architecture of your hub cluster, enter a value for the instruction set architecture of the machines in the pool. Valid values are amd64 , ppc64le , s390x , and arm64 . You can specify the instance type of your control plane pool. You can change the type and size of your instance after it is created. You can create one or more worker nodes in a worker pool to run the container workloads for the cluster. They can be in a single worker pool, or distributed across multiple worker pools. If zero worker nodes are specified, the control plane nodes also function as worker nodes. The information includes the following fields: Instance type: You can change the type and size of your instance after it is created. Node count: This setting is required when you define a worker pool. The networking details are required, and multiple networks are required for using IPv6 addresses. You can add an additional network by clicking Add network . Proxy information that is provided in the credential is automatically added to the proxy fields. You can use the information as it is, overwrite it, or add the information if you want to enable a proxy. The following list contains the required information for creating a proxy: HTTP proxy: The URL that should be used as a proxy for HTTP traffic. HTTPS proxy: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy sites: A comma-separated list of sites that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. When you review your information and optionally customize it before creating the cluster, you can select YAML: On to view the install-config.yaml file content in the panel. You can edit the YAML file with your custom settings, if you have any updates. If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. Note: You do not have to run the oc command that is provided with the cluster details to import the cluster. When you create the cluster, it is automatically configured under the management of multicluster engine operator. Continue with Accessing your cluster for instructions for accessing your cluster. 1.6.4.7. Creating a cluster on VMware vSphere You can use the multicluster engine operator console to deploy a Red Hat OpenShift Container Platform cluster on VMware vSphere. When you create a cluster, the creation process uses the OpenShift Container Platform installer with the Hive resource. If you have questions about cluster creation after completing this procedure, see Installing on vSphere in the OpenShift Container Platform documentation for more information about the process. Prerequisites Creating your cluster with the console 1.6.4.7.1. Prerequisites See the following prerequisites before creating a cluster on vSphere: You must have a hub cluster that is deployed on a supported OpenShift Container Platform version. You need a vSphere credential. See Creating a credential for VMware vSphere for more information. You need an OpenShift Container Platform image pull secret. See Using image pull secrets . You must have the following information for the VMware instance where you are deploying: Required static IP addresses for API and Ingress instances DNS records for: The following API base domain must point to the static API VIP: api.<cluster_name>.<base_domain> The following application base domain must point to the static IP address for Ingress VIP: *.apps.<cluster_name>.<base_domain> 1.6.4.7.2. Creating your cluster with the console To create a cluster from the multicluster engine operator console, navigate to Infrastructure > Clusters . On the Clusters page, click Create cluster and complete the steps in the console. Note: This procedure is for creating a cluster. If you have an existing cluster that you want to import, see Cluster import for those steps. If you need to create a credential, see Creating a credential for VMware vSphere for more information about creating a credential. The name of your cluster is used in the hostname of the cluster. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Tip: Select YAML: On to view content updates as you enter the information in the console. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base domain associated with the selected credential that you configured for your vSphere account, that value is populated in the field. You can change the value by overwriting it. See Installing a cluster on vSphere with customizations for more information. This value must match the name that you used to create the DNS records listed in the prerequisites section. This name is used in the hostname of the cluster. The release image identifies the version of the OpenShift Container Platform image that is used to create the cluster. If the version that you want to use is available, you can select the image from the list of images. If the image that you want to use is not a standard image, you can enter the URL to the image that you want to use. See Release images for more information about release images. Note: Release images for OpenShift Container Platform versions 4.14 and later are supported. The node pools include the control plane pool and the worker pools. The control plane nodes share the management of the cluster activity. The information includes the CPU architecture field. View the following field description: CPU architecture: If the architecture type of the managed cluster is not the same as the architecture of your hub cluster, enter a value for the instruction set architecture of the machines in the pool. Valid values are amd64 , ppc64le , s390x , and arm64 . You can create one or more worker nodes in a worker pool to run the container workloads for the cluster. They can be in a single worker pool, or distributed across multiple worker pools. If zero worker nodes are specified, the control plane nodes also function as worker nodes. The information includes Cores per socket , CPUs , Memory_min MiB, _Disk size in GiB, and Node count . Networking information is required. Multiple networks are required for using IPv6. Some of the required networking information is included the following fields: vSphere network name: Specify the VMware vSphere network name. API VIP: Specify the IP address to use for internal API communication. Note: This value must match the name that you used to create the DNS records listed in the prerequisites section. If not provided, the DNS must be pre-configured so that api. resolves correctly. Ingress VIP: Specify the IP address to use for ingress traffic. Note: This value must match the name that you used to create the DNS records listed in the prerequisites section. If not provided, the DNS must be pre-configured so that test.apps. resolves correctly. You can add an additional network by clicking Add network . You must have more than one network if you are using IPv6 addresses. Proxy information that is provided in the credential is automatically added to the proxy fields. You can use the information as it is, overwrite it, or add the information if you want to enable a proxy. The following list contains the required information for creating a proxy: HTTP proxy: Specify the URL that should be used as a proxy for HTTP traffic. HTTPS proxy: Specify the secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy URL is used for both HTTP and HTTPS . No proxy sites: Provide a comma-separated list of sites that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. You can define the disconnected installation image by clicking Disconnected installation . When creating a cluster by using Red Hat OpenStack Platform provider and disconnected installation, if a certificate is required to access the mirror registry, you must enter it in the Additional trust bundle field in the Configuration for disconnected installation section when configuring your credential or the Disconnected installation section when creating a cluster. You can click Add automation template to create a template. When you review your information and optionally customize it before creating the cluster, you can click the YAML switch On to view the install-config.yaml file content in the panel. You can edit the YAML file with your custom settings, if you have any updates. If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. Note: You do not have to run the oc command that is provided with the cluster details to import the cluster. When you create the cluster, it is automatically configured under the management of multicluster engine operator. Continue with Accessing your cluster for instructions for accessing your cluster. 1.6.4.8. Creating a cluster on Red Hat OpenStack Platform You can use the multicluster engine operator console to deploy a Red Hat OpenShift Container Platform cluster on Red Hat OpenStack Platform. When you create a cluster, the creation process uses the OpenShift Container Platform installer with the Hive resource. If you have questions about cluster creation after completing this procedure, see Installing on OpenStack in the OpenShift Container Platform documentation for more information about the process. Prerequisites Creating your cluster with the console 1.6.4.8.1. Prerequisites See the following prerequisites before creating a cluster on Red Hat OpenStack Platform: You must have a hub cluster that is deployed on OpenShift Container Platform version 4.6 or later. You must have a Red Hat OpenStack Platform credential. See Creating a credential for Red Hat OpenStack Platform for more information. You need an OpenShift Container Platform image pull secret. See Using image pull secrets . You need the following information for the Red Hat OpenStack Platform instance where you are deploying: Flavor name for the control plane and worker instances; for example, m1.xlarge Network name for the external network to provide the floating IP addresses Required floating IP addresses for API and ingress instances DNS records for: The following API base domain must point to the floating IP address for the API: api.<cluster_name>.<base_domain> The following application base domain must point to the floating IP address for ingress:app-name: *.apps.<cluster_name>.<base_domain> 1.6.4.8.2. Creating your cluster with the console To create a cluster from the multicluster engine operator console, navigate to Infrastructure > Clusters . On the Clusters page, click Create cluster and complete the steps in the console. Note: This procedure is for creating a cluster. If you have an existing cluster that you want to import, see Cluster import for those steps. If you need to create a credential, see Creating a credential for Red Hat OpenStack Platform for more information. The name of the cluster is used in the hostname of the cluster. The name must contain fewer than 15 characters. This value must match the name that you used to create the DNS records listed in the credential prerequisites section. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Tip: Select YAML: On to view content updates as you enter the information in the console. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base DNS domain that is associated with the selected credential that you configured for your Red Hat OpenStack Platform account, that value is populated in the field. You can change the value by overwriting it. See Managing domains in the Red Hat OpenStack Platform documentation for more information. This name is used in the hostname of the cluster. The release image identifies the version of the OpenShift Container Platform image that is used to create the cluster. If the version that you want to use is available, you can select the image from the list of images. If the image that you want to use is not a standard image, you can enter the URL to the image that you want to use. See Release images for more information about release images. Only release images for OpenShift Container Platform versions 4.6.x and higher are supported. The node pools include the control plane pool and the worker pools. The control plane nodes share the management of the cluster activity. If the architecture type of the managed cluster is not the same as the architecture of your hub cluster, enter a value for the instruction set architecture of the machines in the pool. Valid values are amd64 , ppc64le , s390x , and arm64 . You must add an instance type for your control plane pool, but you can change the type and size of your instance after it is created. You can create one or more worker nodes in a worker pool to run the container workloads for the cluster. They can be in a single worker pool, or distributed across multiple worker pools. If zero worker nodes are specified, the control plane nodes also function as worker nodes. The information includes the following fields: Instance type: You can change the type and size of your instance after it is created. Node count: Specify the node count for your worker pool. This setting is required when you define a worker pool. Networking details are required for your cluster. You must provide the values for one or more networks for an IPv4 network. For an IPv6 network, you must define more than one network. You can add an additional network by clicking Add network . You must have more than one network if you are using IPv6 addresses. Proxy information that is provided in the credential is automatically added to the proxy fields. You can use the information as it is, overwrite it, or add the information if you want to enable a proxy. The following list contains the required information for creating a proxy: HTTP proxy: Specify the URL that should be used as a proxy for HTTP traffic. HTTPS proxy: The secure proxy URL that should be used for HTTPS traffic. If no value is provided, the same value as the HTTP Proxy is used for both HTTP and HTTPS . No proxy: Define a comma-separated list of sites that should bypass the proxy. Begin a domain name with a period . to include all of the subdomains that are in that domain. Add an asterisk * to bypass the proxy for all destinations. Additional trust bundle: One or more additional CA certificates that are required for proxying HTTPS connections. You can define the disconnected installation image by clicking Disconnected installation . When creating a cluster by using Red Hat OpenStack Platform provider and disconnected installation, if a certificate is required to access the mirror registry, you must enter it in the Additional trust bundle field in the Configuration for disconnected installation section when configuring your credential or the Disconnected installation section when creating a cluster. When you review your information and optionally customize it before creating the cluster, you can click the YAML switch On to view the install-config.yaml file content in the panel. You can edit the YAML file with your custom settings, if you have any updates. When creating a cluster that uses an internal certificate authority (CA), you need to customize the YAML file for your cluster by completing the following steps: With the YAML switch on at the review step, insert a Secret object at the top of the list with the CA certificate bundle. Note: If the Red Hat OpenStack Platform environment provides services using certificates signed by multiple authorities, the bundle must include the certificates to validate all of the required endpoints. The addition for a cluster named ocp3 resembles the following example: apiVersion: v1 kind: Secret type: Opaque metadata: name: ocp3-openstack-trust namespace: ocp3 stringData: ca.crt: | -----BEGIN CERTIFICATE----- <Base64 certificate contents here> -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- <Base64 certificate contents here> -----END CERTIFICATE---- Modify the Hive ClusterDeployment object to specify the value of certificatesSecretRef in spec.platform.openstack , similar to the following example: platform: openstack: certificatesSecretRef: name: ocp3-openstack-trust credentialsSecretRef: name: ocp3-openstack-creds cloud: openstack The example assumes that the cloud name in the clouds.yaml file is openstack . If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. Note: You do not have to run the oc command that is provided with the cluster details to import the cluster. When you create the cluster, it is automatically configured under the management of multicluster engine operator. Continue with Accessing your cluster for instructions for accessing your cluster. 1.6.4.9. Creating a cluster in an on-premises environment You can use the console to create on-premises Red Hat OpenShift Container Platform clusters. The clusters can be single-node OpenShift clusters, multi-node clusters, and compact three-node clusters on VMware vSphere, Red Hat OpenStack, Nutanix, or in a bare metal environment. There is no platform integration with the platform where you install the cluster, as the platform value is set to platform=none . A single-node OpenShift cluster contains only a single node, which hosts the control plane services and the user workloads. This configuration can be helpful when you want to minimize the resource footprint of the cluster. You can also provision multiple single-node OpenShift clusters on edge resources by using the zero touch provisioning feature, which is a feature that is available with Red Hat OpenShift Container Platform. For more information about zero touch provisioning, see Clusters at the network far edge in the OpenShift Container Platform documentation. Prerequisites Creating your cluster with the console Creating your cluster with the command line 1.6.4.9.1. Prerequisites See the following prerequisites before creating a cluster in an on-premises environment: You must have a deployed hub cluster on a supported OpenShift Container Platform version. You need a configured infrastructure environment with a host inventory of configured hosts. You must have internet access for your hub cluster (connected), or a connection to an internal or mirror registry that has a connection to the internet (disconnected) to retrieve the required images for creating the cluster. You need a configured on-premises credential. You need an OpenShift Container Platform image pull secret. See Using image pull secrets . You need the following DNS records: The following API base domain must point to the static API VIP: The following application base domain must point to the static IP address for Ingress VIP: 1.6.4.9.2. Creating your cluster with the console To create a cluster from the console, complete the following steps: Navigate to Infrastructure > Clusters . On the Clusters page, click Create cluster and complete the steps in the console. Select Host inventory as the type of cluster. The following options are available for your assisted installation: Use existing discovered hosts : Select your hosts from a list of hosts that are in an existing host inventory. Discover new hosts : Discover hosts that are not already in an existing infrastructure environment. Discover your own hosts, rather than using one that is already in an infrastructure environment. If you need to create a credential, see Creating a credential for an on-premises environment for more information. The name for your cluster is used in the hostname of the cluster. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Note: Select YAML: On to view content updates as you enter the information in the console. If you want to add your cluster to an existing cluster set, you must have the correct permissions on the cluster set to add it. If you do not have cluster-admin privileges when you are creating the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster creation fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have any cluster set options to select. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , it is automatically added to the default managed cluster set. If there is already a base DNS domain that is associated with the selected credential that you configured for your provider account, that value is populated in that field. You can change the value by overwriting it, but this setting cannot be changed after the cluster is created. The base domain of your provider is used to create routes to your Red Hat OpenShift Container Platform cluster components. It is configured in the DNS of your cluster provider as a Start of Authority (SOA) record. The OpenShift version identifies the version of the OpenShift Container Platform image that is used to create the cluster. If the version that you want to use is available, you can select the image from the list of images. If the image that you want to use is not a standard image, you can enter the URL to the image that you want to use. See Release images to learn more. When you select a supported OpenShift Container Platform version, an option to select Install single-node OpenShift is displayed. A single-node OpenShift cluster contains a single node which hosts the control plane services and the user workloads. See Scaling hosts to an infrastructure environment to learn more about adding nodes to a single-node OpenShift cluster after it is created. If you want your cluster to be a single-node OpenShift cluster, select the single-node OpenShift option. You can add additional workers to single-node OpenShift clusters by completing the following steps: From the console, navigate to Infrastructure > Clusters and select the name of the cluster that you created or want to access. Select Actions > Add hosts to add additional workers. Note: The single-node OpenShift control plane requires 8 CPU cores, while a control plane node for a multinode control plane cluster only requires 4 CPU cores. After you review and save the cluster, your cluster is saved as a draft cluster. You can close the creation process and finish the process later by selecting the cluster name on the Clusters page. If you are using existing hosts, select whether you want to select the hosts yourself, or if you want them to be selected automatically. The number of hosts is based on the number of nodes that you selected. For example, a single-node OpenShift cluster only requires one host, while a standard three-node cluster requires three hosts. The locations of the available hosts that meet the requirements for this cluster are displayed in the list of Host locations . For distribution of the hosts and a more high-availability configuration, select multiple locations. If you are discovering new hosts with no existing infrastructure environment, complete the steps in Adding hosts to the host inventory by using the Discovery Image . After the hosts are bound, and the validations pass, complete the networking information for your cluster by adding the following IP addresses: API VIP: Specifies the IP address to use for internal API communication. Note: This value must match the name that you used to create the DNS records listed in the prerequisites section. If not provided, the DNS must be pre-configured so that api. resolves correctly. Ingress VIP: Specifies the IP address to use for ingress traffic. Note: This value must match the name that you used to create the DNS records listed in the prerequisites section. If not provided, the DNS must be pre-configured so that test.apps. resolves correctly. If you are using Red Hat Advanced Cluster Management for Kubernetes and want to configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes for the required steps. You can view the status of the installation on the Clusters navigation page. Continue with Accessing your cluster for instructions for accessing your cluster. 1.6.4.9.3. Creating your cluster with the command line You can also create a cluster without the console by using the assisted installer feature within the central infrastructure management component. After you complete this procedure, you can boot the host from the discovery image that is generated. The order of the procedures is generally not important, but is noted when there is a required order. 1.6.4.9.3.1. Create the namespace You need a namespace for your resources. It is more convenient to keep all of the resources in a shared namespace. This example uses sample-namespace for the name of the namespace, but you can use any name except assisted-installer . Create a namespace by creating and applying the following file: apiVersion: v1 kind: Namespace metadata: name: sample-namespace 1.6.4.9.3.2. Add the pull secret to the namespace Add your pull secret to your namespace by creating and applying the following custom resource: apiVersion: v1 kind: Secret type: kubernetes.io/dockerconfigjson metadata: name: <pull-secret> namespace: sample-namespace stringData: .dockerconfigjson: 'your-pull-secret-json' 1 1 Add the content of the pull secret. For example, this can include a cloud.openshift.com , quay.io , or registry.redhat.io authentication. 1.6.4.9.3.3. Generate a ClusterImageSet Generate a CustomImageSet to specify the version of OpenShift Container Platform for your cluster by creating and applying the following custom resource: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: name: openshift-v4.14.0 spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.14.0-rc.0-x86_64 Note: You need to create a multi-architecture ClusterImageSet if you install a managed cluster that has a different architecture than the hub cluster. To learn more, see Creating a release image to deploy a cluster on a different architecture . 1.6.4.9.3.4. Create the ClusterDeployment custom resource The ClusterDeployment custom resource definition is an API that controls the lifecycle of the cluster. It references the AgentClusterInstall custom resource in the spec.ClusterInstallRef setting which defines the cluster parameters. Create and apply a ClusterDeployment custom resource based on the following example: apiVersion: hive.openshift.io/v1 kind: ClusterDeployment metadata: name: single-node namespace: demo-worker4 spec: baseDomain: hive.example.com clusterInstallRef: group: extensions.hive.openshift.io kind: AgentClusterInstall name: test-agent-cluster-install 1 version: v1beta1 clusterName: test-cluster controlPlaneConfig: servingCertificates: {} platform: agentBareMetal: agentSelector: matchLabels: location: internal pullSecretRef: name: <pull-secret> 2 1 Use the name of your AgentClusterInstall resource. 2 Use the pull secret that you downloaded in Add the pull secret to the namespace . 1.6.4.9.3.5. Create the AgentClusterInstall custom resource In the AgentClusterInstall custom resource, you can specify many of the requirements for the clusters. For example, you can specify the cluster network settings, platform, number of control planes, and worker nodes. Create and add the a custom resource that resembles the following example: apiVersion: extensions.hive.openshift.io/v1beta1 kind: AgentClusterInstall metadata: name: test-agent-cluster-install namespace: demo-worker4 spec: platformType: BareMetal 1 clusterDeploymentRef: name: single-node 2 imageSetRef: name: openshift-v4.14.0 3 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 192.168.111.0/24 serviceNetwork: - 172.30.0.0/16 provisionRequirements: controlPlaneAgents: 1 sshPublicKey: ssh-rsa <your-public-key-here> 4 1 Specify the platform type of the environment where the cluster is created. Valid values are: BareMetal , None , VSphere , Nutanix , or External . 2 Use the same name that you used for your ClusterDeployment resource. 3 Use the ClusterImageSet that you generated in Generate a ClusterImageSet . 4 You can specify your SSH public key, which enables you to access the host after it is installed. 1.6.4.9.3.6. Optional: Create the NMStateConfig custom resource The NMStateConfig custom resource is only required if you have a host-level network configuration, such as static IP addresses. If you include this custom resource, you must complete this step before creating an InfraEnv custom resource. The NMStateConfig is referred to by the values for spec.nmStateConfigLabelSelector in the InfraEnv custom resource. Create and apply your NMStateConfig custom resource, which resembles the following example. Replace values where needed: apiVersion: agent-install.openshift.io/v1beta1 kind: NMStateConfig metadata: name: <mynmstateconfig> namespace: <demo-worker4> labels: demo-nmstate-label: <value> spec: config: interfaces: - name: eth0 type: ethernet state: up mac-address: 02:00:00:80:12:14 ipv4: enabled: true address: - ip: 192.168.111.30 prefix-length: 24 dhcp: false - name: eth1 type: ethernet state: up mac-address: 02:00:00:80:12:15 ipv4: enabled: true address: - ip: 192.168.140.30 prefix-length: 24 dhcp: false dns-resolver: config: server: - 192.168.126.1 routes: config: - destination: 0.0.0.0/0 -hop-address: 192.168.111.1 -hop-interface: eth1 table-id: 254 - destination: 0.0.0.0/0 -hop-address: 192.168.140.1 -hop-interface: eth1 table-id: 254 interfaces: - name: "eth0" macAddress: "02:00:00:80:12:14" - name: "eth1" macAddress: "02:00:00:80:12:15" Note: You must include the demo-nmstate-label label name and value in the InfraEnv resource spec.nmStateConfigLabelSelector.matchLabels field. 1.6.4.9.3.7. Create the InfraEnv custom resource The InfraEnv custom resource provides the configuration to create the discovery ISO. Within this custom resource, you identify values for proxy settings, ignition overrides, and specify NMState labels. The value of spec.nmStateConfigLabelSelector in this custom resource references the NMStateConfig custom resource. Note: If you plan to include the optional NMStateConfig custom resource, you must reference it in the InfraEnv custom resource. If you create the InfraEnv custom resource before you create the NMStateConfig custom resource edit the InfraEnv custom resource to reference the NMStateConfig custom resource and download the ISO after the reference is added. Create and apply the following custom resource: apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: myinfraenv namespace: demo-worker4 spec: clusterRef: name: single-node 1 namespace: demo-worker4 2 pullSecretRef: name: pull-secret sshAuthorizedKey: <your_public_key_here> nmStateConfigLabelSelector: matchLabels: demo-nmstate-label: value proxy: httpProxy: http://USERNAME:[email protected]:PORT httpsProxy: https://USERNAME:[email protected]:PORT noProxy: .example.com,172.22.0.0/24,10.10.0.0/24 1 Replace the clusterDeployment resource name from Create the ClusterDeployment . 2 Replace the clusterDeployment resource namespace from Create the ClusterDeployment . 1.6.4.9.3.7.1. InfraEnv field table Field Optional or required Description sshAuthorizedKey Optional You can specify your SSH public key, which enables you to access the host when it is booted from the discovery ISO image. nmStateConfigLabelSelector Optional Consolidates advanced network configuration such as static IPs, bridges, and bonds for the hosts. The host network configuration is specified in one or more NMStateConfig resources with labels you choose. The nmStateConfigLabelSelector property is a Kubernetes label selector that matches your chosen labels. The network configuration for all NMStateConfig labels that match this label selector is included in the Discovery Image. When you boot, each host compares each configuration to its network interfaces and applies the appropriate configuration. proxy Optional You can specify proxy settings required by the host during discovery in the proxy section. Note: When provisioning with IPv6, you cannot define a CIDR address block in the noProxy settings. You must define each address separately. 1.6.4.9.3.8. Boot the host from the discovery image The remaining steps explain how to boot the host from the discovery ISO image that results from the procedures. Download the discovery image from the namespace by running the following command: Move the discovery image to virtual media, a USB drive, or another storage location and boot the host from the discovery image that you downloaded. The Agent resource is created automatically. It is registered to the cluster and represents a host that booted from a discovery image. Approve the Agent custom resource and start the installation by running the following command: Replace the agent name and UUID with your values. You can confirm that it was approved when the output of the command includes an entry for the target cluster that includes a value of true for the APPROVED parameter. 1.6.4.9.4. Additional resources For additional steps that are required when creating a cluster on the Nutanix platform with the CLI, see Adding hosts on Nutanix with the API and Nutanix post-installation configuration in the Red Hat OpenShift Container Platform documentation. For additional information about zero touch provisioning, see Clusters at the network far edge in the OpenShift Container Platform documentation. See Using image pull secrets See Creating a credential for an on-premises environment See Release images See Adding hosts to the host inventory by using the Discovery Image 1.6.4.10. Creating a cluster in a proxy environment You can create a Red Hat OpenShift Container Platform cluster when your hub cluster is connected through a proxy server. One of the following situations must be true for the cluster creation to succeed: multicluster engine operator has a private network connection with the managed cluster that you are creating, with managed cluster access to the Internet by using a proxy. The managed cluster is on an infrastructure provider, but the firewall ports enable communication from the managed cluster to the hub cluster. To create a cluster that is configured with a proxy, complete the following steps: Configure the cluster-wide-proxy setting on the hub cluster by adding the following information to your install-config YAML that is stored in your Secret: apiVersion: v1 kind: Proxy baseDomain: <domain> proxy: httpProxy: http://<username>:<password>@<proxy.example.com>:<port> httpsProxy: https://<username>:<password>@<proxy.example.com>:<port> noProxy: <wildcard-of-domain>,<provisioning-network/CIDR>,<BMC-address-range/CIDR> Replace username with the username for your proxy server. Replace password with the password to access your proxy server. Replace proxy.example.com with the path of your proxy server. Replace port with the communication port with the proxy server. Replace wildcard-of-domain with an entry for domains that should bypass the proxy. Replace provisioning-network/CIDR with the IP address of the provisioning network and the number of assigned IP addresses, in CIDR notation. Replace BMC-address-range/CIDR with the BMC address and the number of addresses, in CIDR notation. After you add the values, the settings are applied to your clusters. Provision the cluster by completing the procedure for creating a cluster. See Creating a cluster to select your provider. Note: You can only use install-config YAML when deploying your cluster. After deploying your cluster, any new changes you make to install-config YAML do not apply. To update the configuration after deployment, you must use policies. See Pod policy for more information. 1.6.4.10.1. Additional resources See Creating clusters to select your provider. See Pod policy to learn how to make configuration changes after deploying your cluster. See Cluster lifecycle introduction for more topics. Return to Creating a cluster in a proxy environment . 1.6.4.11. Configuring AgentClusterInstall proxy The AgentClusterInstall proxy fields determine the proxy settings during installation, and are used to create the cluster-wide proxy resource in the created cluster. 1.6.4.11.1. Configuring AgentClusterInstall To configure the AgentClusterInstall proxy, add the proxy settings to the AgentClusterInstall resource. See the following YAML sample with httpProxy , httpsProxy , and noProxy : apiVersion: extensions.hive.openshift.io/v1beta1 kind: AgentClusterInstall spec: proxy: httpProxy: http://<username>:<password>@<proxy.example.com>:<port> 1 httpsProxy: https://<username>:<password>@<proxy.example.com>:<port> 2 noProxy: <wildcard-of-domain>,<provisioning-network/CIDR>,<BMC-address-range/CIDR> 3 1 httpProxy is the URL of the proxy for HTTP requests. Replace the username and password values with your credentials for your proxy server. Replace proxy.example.com with the path of your proxy server. 2 httpsProxy is the URL of the proxy for HTTPS requests. Replace the values with your credentials. Replace port with the communication port with the proxy server. 3 noProxy is a comma-separated list of domains and CIDRs for which the proxy should not be used. Replace wildcard-of-domain with an entry for domains that should bypass the proxy. Replace provisioning-network/CIDR with the IP address of the provisioning network and the number of assigned IP addresses, in CIDR notation. Replace BMC-address-range/CIDR with the BMC address and the number of addresses, in CIDR notation. 1.6.5. Cluster import You can import clusters from different Kubernetes cloud providers. After you import, the target cluster becomes a managed cluster for the multicluster engine operator hub cluster. You can generally complete the import tasks anywhere that you can access the hub cluster and the target managed cluster, unless otherwise specified. A hub cluster cannot manage any other hub cluster, but can manage itself. The hub cluster is configured to automatically be imported and self-managed. You do not need to manually import the hub cluster. If you remove a hub cluster and try to import it again, you must add the local-cluster:true label to the ManagedCluster resource. Important: Cluster lifecycle now supports all providers that are certified through the Cloud Native Computing Foundation (CNCF) Kubernetes Conformance Program. Choose a vendor that is recognized by CNFC for your hybrid cloud multicluster management. See the following information about using CNFC providers: Learn how CNFC providers are certified at Certified Kubernetes Conformance . For Red Hat support information about CNFC third-party providers, see Red Hat support with third party components , or Contact Red Hat support . If you bring your own CNFC conformance certified cluster, you need to change the OpenShift Container Platform CLI oc command to the Kubernetes CLI command, kubectl . Read the following topics to learn more about importing a cluster so that you can manage it: Required user type or access level : Cluster administrator Importing an existing cluster by using the console Importing a managed cluster by using the CLI Importing a managed cluster by using agent registration Importing an on-premises Red Hat OpenShift Container Platform cluster 1.6.5.1. Importing a managed cluster by using the console After you install multicluster engine for Kubernetes operator, you are ready to import a cluster to manage. Continue reading the following topics learn how to import a managed cluster by using the console: Prerequisites Creating a new pull secret Importing a cluster Optional: Configuring the cluster API address Removing a cluster 1.6.5.1.1. Prerequisites A deployed hub cluster. If you are importing bare metal clusters, the hub cluster must be installed on a supported Red Hat OpenShift Container Platform version. A cluster you want to manage. The base64 command line tool. A defined multiclusterhub.spec.imagePullSecret if you are importing a cluster that was not created by OpenShift Container Platform. This secret might have been created when multicluster engine for Kubernetes operator was installed. See Custom image pull secret for more information about how to define this secret. Required user type or access level: Cluster administrator 1.6.5.1.2. Creating a new pull secret If you need to create a new pull secret, complete the following steps: Download your Kubernetes pull secret from cloud.redhat.com . Add the pull secret to the namespace of your hub cluster. Run the following command to create a new secret in the open-cluster-management namespace: Replace open-cluster-management with the name of the namespace of your hub cluster. The default namespace of the hub cluster is open-cluster-management . Replace path-to-pull-secret with the path to the pull secret that you downloaded. The secret is automatically copied to the managed cluster when it is imported. Ensure that a previously installed agent is deleted from the cluster that you want to import. You must remove the open-cluster-management-agent and open-cluster-management-agent-addon namespaces to avoid errors. For importing in a Red Hat OpenShift Dedicated environment, see the following notes: You must have the hub cluster deployed in a Red Hat OpenShift Dedicated environment. The default permission in Red Hat OpenShift Dedicated is dedicated-admin, but that does not contain all of the permissions to create a namespace. You must have cluster-admin permissions to import and manage a cluster with multicluster engine operator. 1.6.5.1.3. Importing a cluster You can import existing clusters from the console for each of the available cloud providers. Note: A hub cluster cannot manage a different hub cluster. A hub cluster is set up to automatically import and manage itself, so you do not have to manually import a hub cluster to manage itself. By default, the namespace is used for the cluster name and namespace, but you can change it. Important: When you create a cluster, the controller creates a namespace for the cluster and its resources. Ensure that you include only resources for that cluster instance in that namespace. Destroying the cluster deletes the namespace and all of the resources in it. Every managed cluster must be associated with a managed cluster set. If you do not assign the managed cluster to a ManagedClusterSet , the cluster is automatically added to the default managed cluster set. If you want to add the cluster to a different cluster set, you must have clusterset-admin privileges to the cluster set. If you do not have cluster-admin privileges when you are importing the cluster, you must select a cluster set on which you have clusterset-admin permissions. If you do not have the correct permissions on the specified cluster set, the cluster importing fails. Contact your cluster administrator to provide you with clusterset-admin permissions to a cluster set if you do not have cluster set options to select. If you import a OpenShift Container Platform Dedicated cluster and do not specify a vendor by adding a label for vendor=OpenShiftDedicated , or if you add a label for vendor=auto-detect , a managed-by=platform label is automatically added to the cluster. You can use this added label to identify the cluster as a OpenShift Container Platform Dedicated cluster and retrieve the OpenShift Container Platform Dedicated clusters as a group. The following table provides the available options for import mode , which specifies the method for importing the cluster: Run import commands manually After completing and submitting the information in the console, including any Red Hat Ansible Automation Platform templates, run the provided command on the target cluster to import the cluster. Enter your server URL and API token for the existing cluster Provide the server URL and API token of the cluster that you are importing. You can specify a Red Hat Ansible Automation Platform template to run when the cluster is upgraded. Provide the kubeconfig file Copy and paste the contents of the kubeconfig file of the cluster that you are importing. You can specify a Red Hat Ansible Automation Platform template to run when the cluster is upgraded. Note: You must have the Red Hat Ansible Automation Platform Resource Operator installed from OperatorHub to create and run an Ansible Automation Platform job. To configure a cluster API address, see Optional: Configuring the cluster API address . To configure your managed cluster klusterlet to run on specific nodes, see Optional: Configuring the klusterlet to run on specific nodes . 1.6.5.1.3.1. Optional: Configuring the cluster API address Complete the following steps to optionally configure the Cluster API address that is on the cluster details page by configuring the URL that is displayed in the table when you run the oc get managedcluster command: Log in to your hub cluster with an ID that has cluster-admin permissions. Configure a kubeconfig file for your targeted managed cluster. Edit the managed cluster entry for the cluster that you are importing by running the following command, replacing cluster-name with the name of the managed cluster: Add the ManagedClusterClientConfigs section to the ManagedCluster spec in the YAML file, as shown in the following example: spec: hubAcceptsClient: true managedClusterClientConfigs: - url: <https://api.new-managed.dev.redhat.com> 1 1 Replace the value of the URL with the URL that provides external access to the managed cluster that you are importing. 1.6.5.1.3.2. Optional: Configuring the klusterlet to run on specific nodes You can specify which nodes you want the managed cluster klusterlet to run on by configuring the nodeSelector and tolerations annotation for the managed cluster. Complete the following steps to configure these settings: Select the managed cluster that you want to update from the clusters page in the console. Set the YAML switch to On to view the YAML content. Note: The YAML editor is only available when importing or creating a cluster. To edit the managed cluster YAML definition after importing or creating, you must use the OpenShift Container Platform command-line interface or the Red Hat Advanced Cluster Management search feature. Add the nodeSelector annotation to the managed cluster YAML definition. The key for this annotation is: open-cluster-management/nodeSelector . The value of this annotation is a string map with JSON formatting. Add the tolerations entry to the managed cluster YAML definition. The key of this annotation is: open-cluster-management/tolerations . The value of this annotation represents a toleration list with JSON formatting. The resulting YAML might resemble the following example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: open-cluster-management/nodeSelector: '{"dedicated":"acm"}' open-cluster-management/tolerations: '[{"key":"dedicated","operator":"Equal","value":"acm","effect":"NoSchedule"}]' You can also use a KlusterletConfig to configure the nodeSelector and tolerations for the managed cluster. Complete the following steps to configure these settings: Note: If you use a KlusterletConfig , the managed cluster uses the configuration in the KlusterletConfig settings instead of the settings in the managed cluster annotation. Apply the following sample YAML content. Replace value where needed: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: <klusterletconfigName> spec: nodePlacement: nodeSelector: dedicated: acm tolerations: - key: dedicated operator: Equal value: acm effect: NoSchedule Add the agent.open-cluster-management.io/klusterlet-config: `<klusterletconfigName> annotation to the managed cluster, replacing <klusterletconfigName> with the name of your KlusterletConfig . 1.6.5.1.4. Removing an imported cluster Complete the following procedure to remove an imported cluster and the open-cluster-management-agent-addon that was created on the managed cluster. On the Clusters page, click Actions > Detach cluster to remove your cluster from management. Note: If you attempt to detach the hub cluster, which is named local-cluster , be aware that the default setting of disableHubSelfManagement is false . This setting causes the hub cluster to reimport itself and manage itself when it is detached and it reconciles the MultiClusterHub controller. It might take hours for the hub cluster to complete the detachment process and reimport. If you want to reimport the hub cluster without waiting for the processes to finish, you can run the following command to restart the multiclusterhub-operator pod and reimport faster: You can change the value of the hub cluster to not import automatically by changing the disableHubSelfManagement value to true . For more information, see the disableHubSelfManagement topic. 1.6.5.1.4.1. Additional resources See Custom image pull secret for more information about how to define a custom image pull secret. See the disableHubSelfManagement topic. 1.6.5.2. Importing a managed cluster by using the CLI After you install multicluster engine for Kubernetes operator, you are ready to import a cluster and manage it by using the Red Hat OpenShift Container Platform CLI. Continue reading the following topics to learn how to import a managed cluster with the CLI by using the auto import secret, or by using manual commands. Prerequisites Supported architectures Preparing cluster import Importing a cluster by using the auto import secret Importing a cluster manually Importing the klusterlet add-on Removing an imported cluster by using the CLI Important: A hub cluster cannot manage a different hub cluster. A hub cluster is set up to automatically import and manage itself as a local cluster . You do not have to manually import a hub cluster to manage itself. If you remove a hub cluster and try to import it again, you need to add the local-cluster:true label. 1.6.5.2.1. Prerequisites A deployed hub cluster. If you are importing bare metal clusters, the hub cluster must be installed on a supported OpenShift Container Platform version. A separate cluster you want to manage. The OpenShift Container Platform CLI. See Getting started with the OpenShift CLI for information about installing and configuring the OpenShift Container Platform CLI. A defined multiclusterhub.spec.imagePullSecret if you are importing a cluster that was not created by OpenShift Container Platform. This secret might have been created when multicluster engine for Kubernetes operator was installed. See Custom image pull secret for more information about how to define this secret. 1.6.5.2.2. Supported architectures Linux (x86_64, s390x, ppc64le) macOS 1.6.5.2.3. Preparing for cluster import Before importing a managed cluster by using the CLI, you must complete the following steps: Log in to your hub cluster by running the following command: Run the following command on the hub cluster to create the project and namespace. The cluster name that is defined in <cluster_name> is also used as the cluster namespace in the YAML file and commands: Important: The cluster.open-cluster-management.io/managedCluster label is automatically added to and removed from a managed cluster namespace. Do not manually add it to or remove it from a managed cluster namespace. Create a file named managed-cluster.yaml with the following example content: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: <cluster_name> labels: cloud: auto-detect vendor: auto-detect spec: hubAcceptsClient: true When the values for cloud and vendor are set to auto-detect , Red Hat Advanced Cluster Management detects the cloud and vendor types automatically from the cluster that you are importing. You can optionally replace the values for auto-detect with with the cloud and vendor values for your cluster. See the following example: cloud: Amazon vendor: OpenShift Apply the YAML file to the ManagedCluster resource by running the following command: You can now continue with either Importing the cluster by using the auto import secret or Importing the cluster manually . 1.6.5.2.4. Importing a cluster by using the auto import secret To import a managed cluster by using the auto import secret, you must create a secret that contains either a reference to the kubeconfig file of the cluster, or the kube API server and token pair of the cluster. Complete the following steps to import a cluster by using the auto import secret: Retrieve the kubeconfig file, or the kube API server and token, of the managed cluster that you want to import. See the documentation for your Kubernetes cluster to learn where to locate your kubeconfig file or your kube API server and token. Create the auto-import-secret.yaml file in the USD{CLUSTER_NAME} namespace. Create a YAML file named auto-import-secret.yaml by using content that is similar to the following template: apiVersion: v1 kind: Secret metadata: name: auto-import-secret namespace: <cluster_name> stringData: autoImportRetry: "5" # If you are using the kubeconfig file, add the following value for the kubeconfig file # that has the current context set to the cluster to import: kubeconfig: |- <kubeconfig_file> # If you are using the token/server pair, add the following two values instead of # the kubeconfig file: token: <Token to access the cluster> server: <cluster_api_url> type: Opaque Apply the YAML file in the <cluster_name> namespace by running the following command: Note: By default, the auto import secret is used one time and deleted when the import process completes. If you want to keep the auto import secret, add managedcluster-import-controller.open-cluster-management.io/keeping-auto-import-secret to the secret. You can add it by running the following command: Validate the JOINED and AVAILABLE status for your imported cluster. Run the following command from the hub cluster: Log in to the managed cluster by running the following command on the cluster: You can validate the pod status on the cluster that you are importing by running the following command: You can now continue with Importing the klusterlet add-on . 1.6.5.2.5. Importing a cluster manually Important: The import command contains pull secret information that is copied to each of the imported managed clusters. Anyone who can access the imported clusters can also view the pull secret information. Complete the following steps to import a managed cluster manually: Obtain the klusterlet-crd.yaml file that was generated by the import controller on your hub cluster by running the following command: Obtain the import.yaml file that was generated by the import controller on your hub cluster by running the following command: Proceed with the following steps in the cluster that you are importing: Log in to the managed cluster that you are importing by entering the following command: Apply the klusterlet-crd.yaml that you generated in step 1 by running the following command: Apply the import.yaml file that you previously generated by running the following command: You can validate the JOINED and AVAILABLE status for the managed cluster that you are importing by running the following command from the hub cluster: You can now continue with Importing the klusterlet add-on . 1.6.5.2.6. Importing the klusterlet add-on Implement the KlusterletAddonConfig klusterlet add-on configuration to enable other add-ons on your managed clusters. Create and apply the configuration file by completing the following steps: Create a YAML file that is similar to the following example: apiVersion: agent.open-cluster-management.io/v1 kind: KlusterletAddonConfig metadata: name: <cluster_name> namespace: <cluster_name> spec: applicationManager: enabled: true certPolicyController: enabled: true policyController: enabled: true searchCollector: enabled: true Save the file as klusterlet-addon-config.yaml . Apply the YAML by running the following command: Add-ons are installed after the managed cluster status you are importing is AVAILABLE . You can validate the pod status of add-ons on the cluster you are importing by running the following command: 1.6.5.2.7. Removing an imported cluster by using the command line interface To remove a managed cluster by using the command line interface, run the following command: Replace <cluster_name> with the name of the cluster. 1.6.5.3. Importing a managed cluster by using agent registration After you install multicluster engine for Kubernetes operator, you are ready to import a cluster and manage it by using the agent registration endpoint. Continue reading the following topics to learn how to import a managed cluster by using the agent registration endpoint. Prerequisites Supported architectures Importing a cluster 1.6.5.3.1. Prerequisites A deployed hub cluster. If you are importing bare metal clusters, the hub cluster must be installed on a supported OpenShift Container Platform version. A cluster you want to manage. The base64 command line tool. A defined multiclusterhub.spec.imagePullSecret if you are importing a cluster that was not created by OpenShift Container Platform. This secret might have been created when multicluster engine for Kubernetes operator was installed. See Custom image pull secret for more information about how to define this secret. If you need to create a new secret, see Creating a new pull secret . 1.6.5.3.2. Supported architectures Linux (x86_64, s390x, ppc64le) macOS 1.6.5.3.3. Importing a cluster To import a managed cluster by using the agent registration endpoint, complete the following steps: Get the agent registration server URL by running the following command on the hub cluster: Note: If your hub cluster is using a cluster-wide-proxy, make sure that you are using the URL that managed cluster can access. Get the cacert by running the following command: Note: If you are not using the kube-root-ca issued endpoint, use the public agent-registration API endpoint CA instead of the kube-root-ca CA. Get the token for the agent registration sever to authorize by applying the following YAML content: apiVersion: v1 kind: ServiceAccount metadata: name: managed-cluster-import-agent-registration-sa namespace: multicluster-engine --- apiVersion: v1 kind: Secret type: kubernetes.io/service-account-token metadata: name: managed-cluster-import-agent-registration-sa-token namespace: multicluster-engine annotations: kubernetes.io/service-account.name: "managed-cluster-import-agent-registration-sa" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: managedcluster-import-controller-agent-registration-client rules: - nonResourceURLs: ["/agent-registration/*"] verbs: ["get"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: managed-cluster-import-agent-registration roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: managedcluster-import-controller-agent-registration-client subjects: - kind: ServiceAccount name: managed-cluster-import-agent-registration-sa namespace: multicluster-engine Run the following command to export the token: Enable the automatic approval and patch the content to cluster-manager by running the following command: Note: You can also disable automatic approval and manually approve certificate signing requests from managed clusters. Switch to your managed cluster and get the cacert by running the following command: Run the following command to import the managed cluster to the hub cluster: Replace <clusterName> with the name of you cluster. Optional: Replace <klusterletconfigName> with the name of your KlusterletConfig. 1.6.5.4. Importing an on-premises Red Hat OpenShift Container Platform cluster manually After you install multicluster engine for Kubernetes operator, you are ready to import a cluster to manage. You can import an existing OpenShift Container Platform cluster so that you can add additional nodes. Your hub cluster is automatically imported when you install multicluster engine operator, so you can add nodes to your hub cluster without completing the following procedure. Continue reading the following topics to learn more: Prerequisites Importing a cluster 1.6.5.4.1. Prerequisites Enable the central infrastructure management service. 1.6.5.4.2. Importing a cluster Complete the following steps to import an OpenShift Container Platform cluster manually, without a static network or a bare metal host, and prepare it for adding nodes: Create a namespace for the OpenShift Container Platform cluster that you want to import by applying the following YAML content: apiVersion: v1 kind: Namespace metadata: name: managed-cluster Make sure that a ClusterImageSet matching the OpenShift Container Platform cluster you are importing exists by applying the following YAML content: apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: name: openshift-v4.11.18 spec: releaseImage: quay.io/openshift-release-dev/ocp-release@sha256:22e149142517dfccb47be828f012659b1ccf71d26620e6f62468c264a7ce7863 Add your pull secret to access the image by applying the following YAML content: apiVersion: v1 kind: Secret type: kubernetes.io/dockerconfigjson metadata: name: pull-secret namespace: managed-cluster stringData: .dockerconfigjson: <pull-secret-json> 1 1 Replace <pull-secret-json> with your pull secret JSON. Copy the kubeconfig from your OpenShift Container Platform cluster to the hub cluster. Get the kubeconfig from your OpenShift Container Platform cluster by running the following command. Make sure that kubeconfig is set as the cluster being imported: Note: If your cluster API is accessed through a custom domain, you must first edit this kubeconfig by adding your custom certificates in the certificate-authority-data field and by changing the server field to match your custom domain. Copy the kubeconfig to the hub cluster by running the following command. Make sure that kubeconfig is set as your hub cluster: Create an AgentClusterInstall custom resource by applying the following YAML content. Replace values where needed: apiVersion: extensions.hive.openshift.io/v1beta1 kind: AgentClusterInstall metadata: name: <your-cluster-name> 1 namespace: <managed-cluster> spec: networking: userManagedNetworking: true clusterDeploymentRef: name: <your-cluster> imageSetRef: name: openshift-v4.11.18 provisionRequirements: controlPlaneAgents: 2 sshPublicKey: <""> 3 1 Choose a name for your cluster. 2 Use 1 if you are using a single-node OpenShift cluster. Use 3 if you are using a multinode cluster. 3 Add the optional sshPublicKey field to log in to nodes for troubleshooting. Create a ClusterDeployment by applying the following YAML content. Replace values where needed: apiVersion: hive.openshift.io/v1 kind: ClusterDeployment metadata: name: <your-cluster-name> 1 namespace: managed-cluster spec: baseDomain: <redhat.com> 2 installed: <true> 3 clusterMetadata: adminKubeconfigSecretRef: name: <your-cluster-name-admin-kubeconfig> 4 clusterID: <""> 5 infraID: <""> 6 clusterInstallRef: group: extensions.hive.openshift.io kind: AgentClusterInstall name: your-cluster-name-install version: v1beta1 clusterName: your-cluster-name platform: agentBareMetal: pullSecretRef: name: pull-secret 1 Choose a name for your cluster. 2 Make sure baseDomain matches the domain you are using for your OpenShift Container Platform cluster. 3 Set to true to automatically import your OpenShift Container Platform cluster as a production environment cluster. 4 Reference the kubeconfig you created in step 4. 5 6 Leave clusterID and infraID empty in production environments. Add an InfraEnv custom resource to discover new hosts to add to your cluster by applying the following YAML content. Replace values where needed: Note: The following example might require additional configuration if you are not using a static IP address. apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: your-infraenv namespace: managed-cluster spec: clusterRef: name: your-cluster-name namespace: managed-cluster pullSecretRef: name: pull-secret sshAuthorizedKey: "" Table 1.7. InfraEnv field table Field Optional or required Description clusterRef Optional The clusterRef field is optional if you are using late binding. If you are not using late binding, you must add the clusterRef . sshAuthorizedKey Optional Add the optional sshAuthorizedKey field to log in to nodes for troubleshooting. If the import is successful, a URL to download an ISO file appears. Download the ISO file by running the following command, replacing <url> with the URL that appears: Note: You can automate host discovery by using bare metal host. Optional: If you want to use Red Hat Advanced Cluster Management features, such as policies, on your OpenShift Container Platform cluster, create a ManagedCluster resource. Make sure that the name of your ManagedCluster resource matches the name of your ClusterDeplpoyment resource. If you are missing the ManagedCluster resource, your cluster status is detached in the console. 1.6.5.5. Specifying image registry on managed clusters for import You might need to override the image registry on the managed clusters that you are importing. You can do this by creating a ManagedClusterImageRegistry custom resource definition. The ManagedClusterImageRegistry custom resource definition is a namespace-scoped resource. The ManagedClusterImageRegistry custom resource definition specifies a set of managed clusters for a Placement to select, but needs different images from the custom image registry. After the managed clusters are updated with the new images, the following label is added to each managed cluster for identification: open-cluster-management.io/image-registry=<namespace>.<managedClusterImageRegistryName> . The following example shows a ManagedClusterImageRegistry custom resource definition: apiVersion: imageregistry.open-cluster-management.io/v1alpha1 kind: ManagedClusterImageRegistry metadata: name: <imageRegistryName> namespace: <namespace> spec: placementRef: group: cluster.open-cluster-management.io resource: placements name: <placementName> 1 pullSecret: name: <pullSecretName> 2 registries: 3 - mirror: <mirrored-image-registry-address> source: <image-registry-address> - mirror: <mirrored-image-registry-address> source: <image-registry-address> 1 Replace with the name of a Placement in the same namespace that selects a set of managed clusters. 2 Replace with the name of the pull secret that is used to pull images from the custom image registry. 3 List the values for each of the source and mirror registries. Replace the mirrored-image-registry-address and image-registry-address with the value for each of the mirror and source values of the registries. Example 1: To replace the source image registry named registry.redhat.io/rhacm2 with localhost:5000/rhacm2 , and registry.redhat.io/multicluster-engine with localhost:5000/multicluster-engine , use the following example: registries: - mirror: localhost:5000/rhacm2/ source: registry.redhat.io/rhacm2 - mirror: localhost:5000/multicluster-engine source: registry.redhat.io/multicluster-engine Example 2: To replace the source image, registry.redhat.io/rhacm2/registration-rhel8-operator with localhost:5000/rhacm2-registration-rhel8-operator , use the following example: registries: - mirror: localhost:5000/rhacm2-registration-rhel8-operator source: registry.redhat.io/rhacm2/registration-rhel8-operator Important: If you are importing a managed cluster by using agent registration, you must create a KlusterletConfig that contains image registries. See the following example. Replace values where needed: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: <klusterletconfigName> spec: pullSecret: namespace: <pullSecretNamespace> name: <pullSecretName> registries: - mirror: <mirrored-image-registry-address> source: <image-registry-address> - mirror: <mirrored-image-registry-address> source: <image-registry-address> See Importing a managed cluster by using the agent registration endpoint to learn more. 1.6.5.5.1. Importing a cluster that has a ManagedClusterImageRegistry Complete the following steps to import a cluster that is customized with a ManagedClusterImageRegistry custom resource definition: Create a pull secret in the namespace where you want your cluster to be imported. For these steps, the namespace is myNamespace . Create a Placement in the namespace that you created. apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: myPlacement namespace: myNamespace spec: clusterSets: - myClusterSet tolerations: - key: "cluster.open-cluster-management.io/unreachable" operator: Exists Note: The unreachable toleration is required for the Placement to be able to select the cluster. Create a ManagedClusterSet resource and bind it to your namespace. apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSet metadata: name: myClusterSet --- apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: name: myClusterSet namespace: myNamespace spec: clusterSet: myClusterSet Create the ManagedClusterImageRegistry custom resource definition in your namespace. apiVersion: imageregistry.open-cluster-management.io/v1alpha1 kind: ManagedClusterImageRegistry metadata: name: myImageRegistry namespace: myNamespace spec: placementRef: group: cluster.open-cluster-management.io resource: placements name: myPlacement pullSecret: name: myPullSecret registry: myRegistryAddress Import a managed cluster from the console and add it to a managed cluster set. Copy and run the import commands on the managed cluster after the open-cluster-management.io/image-registry=myNamespace.myImageRegistry label is added to the managed cluster. 1.6.6. Accessing your cluster To access an Red Hat OpenShift Container Platform cluster that was created and is managed, complete the following steps: From the console, navigate to Infrastructure > Clusters and select the name of the cluster that you created or want to access. Select Reveal credentials to view the user name and password for the cluster. Note these values to use when you log in to the cluster. Note: The Reveal credentials option is not available for imported clusters. Select Console URL to link to the cluster. Log in to the cluster by using the user ID and password that you found in step three. 1.6.7. Scaling managed clusters For clusters that you created, you can customize and resize your managed cluster specifications, such as virtual machine sizes and number of nodes. See the following option if you are using installer-provisioned infrastructure for cluster deployment: Scaling with MachinePool See the following options if you are using central infrastructure management for cluster deployment: Adding worker nodes to OpenShift Container Platform clusters Adding control plane nodes to managed clusters 1.6.7.1. Scaling with MachinePool For clusters that you provision with multicluster engine operator, a MachinePool resource is automatically created for you. You can further customize and resize your managed cluster specifications, such as virtual machine sizes and number of nodes, by using MachinePool . Using the MachinePool resource is not supported for bare metal clusters. A MachinePool resource is a Kubernetes resource on the hub cluster that groups the MachineSet resources together on the managed cluster. The MachinePool resource uniformly configures a set of machine resources, including zone configurations, instance type, and root storage. With MachinePool , you can manually configure the desired number of nodes or configure autoscaling of nodes on the managed cluster. 1.6.7.1.1. Configure autoscaling Configuring autoscaling provides the flexibility of your cluster to scale as needed to lower your cost of resources by scaling down when traffic is low, and by scaling up to ensure that there are enough resources when there is a higher demand for resources. To enable autoscaling on your MachinePool resources using the console, complete the following steps: In the navigation, select Infrastructure > Clusters . Click the name of your target cluster and select the Machine pools tab. From the machine pools page, select Enable autoscale from the Options menu for the target machine pool. Select the minimum and maximum number of machine set replicas. A machine set replica maps directly to a node on the cluster. The changes might take several minutes to reflect on the console after you click Scale . You can view the status of the scaling operation by clicking View machines in the notification of the Machine pools tab. To enable autoscaling on your MachinePool resources using the command line, complete the following steps: Enter the following command to view your list of machine pools, replacing managed-cluster-namespace with the namespace of your target managed cluster. Enter the following command to edit the YAML file for the machine pool: Replace MachinePool-resource-name with the name of your MachinePool resource. Replace managed-cluster-namespace with the name of the namespace of your managed cluster. Delete the spec.replicas field from the YAML file. Add the spec.autoscaling.minReplicas setting and spec.autoscaling.maxReplicas fields to the resource YAML. Add the minimum number of replicas to the minReplicas setting. Add the maximum number of replicas into the maxReplicas setting. Save the file to submit the changes. 1.6.7.1.2. Disabling autoscaling You can disable autoscaling by using the console or the command line. To disable autoscaling by using the console, complete the following steps: In the navigation, select Infrastructure > Clusters . Click the name of your target cluster and select the Machine pools tab. From the machine pools page, select Disable autoscale from the Options menu for the target machine pool. Select the number of machine set replicas that you want. A machine set replica maps directly with a node on the cluster. It might take several minutes to display in the console after you click Scale . You can view the status of the scaling by clicking View machines in the notification on the Machine pools tab. To disable autoscaling by using the command line, complete the following steps: Enter the following command to view your list of machine pools: Replace managed-cluster-namespace with the namespace of your target managed cluster. Enter the following command to edit the YAML file for the machine pool: Replace name-of-MachinePool-resource with the name of your MachinePool resource. Replace namespace-of-managed-cluster with the name of the namespace of your managed cluster. Delete the spec.autoscaling field from the YAML file. Add the spec.replicas field to the resource YAML. Add the number of replicas to the replicas setting. Save the file to submit the changes. 1.6.7.1.3. Enabling manual scaling You can scale manually from the console and from the command line. 1.6.7.1.3.1. Enabling manual scaling with the console To scale your MachinePool resources using the console, complete the following steps: Disable autoscaling for your MachinePool if it is enabled. See the steps. From the console, click Infrastructure > Clusters . Click the name of your target cluster and select the Machine pools tab. From the machine pools page, select Scale machine pool from the Options menu for the targeted machine pool. Select the number of machine set replicas that you want. A machine set replica maps directly with a node on the cluster. Changes might take several minutes to reflect on the console after you click Scale . You can view the status of the scaling operation by clicking View machines from the notification of the Machine pools tab. 1.6.7.1.3.2. Enabling manual scaling with the command line To scale your MachinePool resources by using the command line, complete the following steps: Enter the following command to view your list of machine pools, replacing <managed-cluster-namespace> with the namespace of your target managed cluster namespace: Enter the following command to edit the YAML file for the machine pool: Replace MachinePool-resource-name with the name of your MachinePool resource. Replace managed-cluster-namespace with the name of the namespace of your managed cluster. Delete the spec.autoscaling field from the YAML file. Modify the spec.replicas field in the YAML file with the number of replicas you want. Save the file to submit the changes. 1.6.7.2. Adding worker nodes to OpenShift Container Platform clusters If you are using central infrastructure management, you can customize your OpenShift Container Platform clusters by adding additional production environment nodes. Required access: Administrator Prerequisite Creating a valid kubeconfig Adding worker nodes 1.6.7.2.1. Prerequisite You must have the new CA certificates required to trust the managed cluster API. 1.6.7.2.2. Creating a valid kubeconfig Before adding production environment worker nodes to OpenShift Container Platform clusters, you must check if you have a valid kubeconfig . If the API certificates in your managed cluster changed, complete the following steps to update the kubeconfig with new CA certificates: Check if the kubeconfig for your clusterDeployment is valid by running the following commands. Replace <kubeconfig_name> with the name of your current kubeconfig and replace <cluster_name> with the name of your cluster: export <kubeconfig_name>=USD(oc get cd USD<cluster_name> -o "jsonpath={.spec.clusterMetadata.adminKubeconfigSecretRef.name}") oc extract secret/USD<kubeconfig_name> --keys=kubeconfig --to=- > original-kubeconfig oc --kubeconfig=original-kubeconfig get node If you receive the following error message, you must update your kubeconfig secret. If you receive no error message, continue to Adding worker nodes : Get the base64 encoded certificate bundle from your kubeconfig certificate-authority-data field and decode it by running the following command: echo <base64 encoded blob> | base64 --decode > decoded-existing-certs.pem Create an updated kubeconfig file by copying your original file. Run the following command and replace <new_kubeconfig_name> with the name of your new kubeconfig file: cp original-kubeconfig <new_kubeconfig_name> Append new certificates to the decoded pem by running the following command: cat decoded-existing-certs.pem new-ca-certificate.pem | openssl base64 -A Add the base64 output from the command as the value of the certificate-authority-data key in your new kubeconfig file by using a text editor. Check if the new kubeconfig is valid by querying the API with the new kubeconfig . Run the following command. Replace <new_kubeconfig_name> with the name of your new kubeconfig file: KUBECONFIG=<new_kubeconfig_name> oc get nodes If you receive a successful output, the kubeconfig is valid. Update the kubeconfig secret in the Red Hat Advanced Cluster Management hub cluster by running the following command. Replace <new_kubeconfig_name> with the name of your new kubeconfig file: oc patch secret USDoriginal-kubeconfig --type='json' -p="[{'op': 'replace', 'path': '/data/kubeconfig', 'value': 'USD(openssl base64 -A -in <new_kubeconfig_name>)'},{'op': 'replace', 'path': '/data/raw-kubeconfig', 'value': 'USD(openssl base64 -A -in <new_kubeconfig_name>)'}]" 1.6.7.2.3. Adding worker nodes If you have a valid kubeconfig , complete the following steps to add production environment worker nodes to OpenShift Container Platform clusters: Boot the machine that you want to use as a worker node from the ISO you previously downloaded. Note: Make sure that the worker node meets the requirements for an OpenShift Container Platform worker node. Wait for an agent to register after running the following command: watch -n 5 "oc get agent -n managed-cluster" If the agent registration is succesful, an agent is listed. Approve the agent for installation. This can take a few minutes. Note: If the agent is not listed, exit the watch command by pressing Ctrl and C, then log in to the worker node to troubleshoot. If you are using late binding, run the following command to associate pending unbound agents with your OpenShift Container Platform cluster. Skip to step 5 if you are not using late binding: oc get agent -n managed-cluster -ojson | jq -r '.items[] | select(.spec.approved==false) |select(.spec.clusterDeploymentName==null) | .metadata.name'| xargs oc -n managed-cluster patch -p '{"spec":{"clusterDeploymentName":{"name":"some-other-cluster","namespace":"managed-cluster"}}}' --type merge agent Approve any pending agents for installation by running the following command: oc get agent -n managed-cluster -ojson | jq -r '.items[] | select(.spec.approved==false) | .metadata.name'| xargs oc -n managed-cluster patch -p '{"spec":{"approved":true}}' --type merge agent Wait for the installation of the worker node. When the worker node installation is complete, the worker node contacts the managed cluster with a Certificate Signing Request (CSR) to start the joining process. The CSR is automatically signed. 1.6.7.3. Adding control plane nodes to managed clusters You can replace a failing control plane by adding control plane nodes to healthy or unhealthy managed clusters. Required access: Administrator 1.6.7.3.1. Adding control plane nodes to healthy managed clusters Complete the following steps to add control plane nodes to healthy managed clusters: Complete the steps in Adding worker nodes to OpenShift Container Platform clusters for your the new control plane node. If you are using the Discovery ISO to add a node, set the agent to master before you approve the agent. Run the following command: oc patch agent <AGENT-NAME> -p '{"spec":{"role": "master"}}' --type=merge Note: CSRs are not automatically approved. If you are using a BareMetalHost to add a node, add the following line to your BareMetalHost annotations when creating the BareMetalHost resource: bmac.agent-install.openshift.io/role: master Follow the steps in Installing a primary control plane node on a healthy cluster in the Assisted Installer for OpenShift Container Platform documentation 1.6.7.3.2. Adding control plane nodes to unhealthy managed clusters Complete the following steps to add control plane nodes to unhealthy managed clusters: Remove the agent for unhealthy control plane nodes. If you used the zero-touch provisioning flow for deployment, remove the bare metal host. Complete the steps in Adding worker nodes to OpenShift Container Platform clusters for your the new control plane node. Set the agent to master before you approve the agent by running the following command: oc patch agent <AGENT-NAME> -p '{"spec":{"role": "master"}}' --type=merge Note: CSRs are not automatically approved. Follow the steps in Installing a primary control plane node on an unhealthy cluster in the Assisted Installer for OpenShift Container Platform documentation 1.6.8. Hibernating a created cluster You can hibernate a cluster that was created using multicluster engine operator to conserve resources. A hibernating cluster requires significantly fewer resources than one that is running, so you can potentially lower your provider costs by moving clusters in and out of a hibernating state. This feature only applies to clusters that were created by multicluster engine operator in the following environments: Amazon Web Services Microsoft Azure Google Cloud Platform 1.6.8.1. Hibernate a cluster by using the console To use the console to hibernate a cluster that was created by multicluster engine operator, complete the following steps: From the navigation menu, select Infrastructure > Clusters . Ensure that the Manage clusters tab is selected. Select Hibernate cluster from the Options menu for the cluster. Note: If the Hibernate cluster option is not available, you cannot hibernate the cluster. This can happen when the cluster is imported, and not created by multicluster engine operator. The status for the cluster on the Clusters page is Hibernating when the process completes. Tip: You can hibernate multiple clusters by selecting the clusters that you want to hibernate on the Clusters page, and selecting Actions > Hibernate clusters . Your selected cluster is hibernating. 1.6.8.2. Hibernate a cluster by using the CLI To use the CLI to hibernate a cluster that was created by multicluster engine operator, complete the following steps: Enter the following command to edit the settings for the cluster that you want to hibernate: Replace name-of-cluster with the name of the cluster that you want to hibernate. Replace namespace-of-cluster with the namespace of the cluster that you want to hibernate. Change the value for spec.powerState to Hibernating . Enter the following command to view the status of the cluster: Replace name-of-cluster with the name of the cluster that you want to hibernate. Replace namespace-of-cluster with the namespace of the cluster that you want to hibernate. When the process of hibernating the cluster is complete, the value of the type for the cluster is type=Hibernating . Your selected cluster is hibernating. 1.6.8.3. Resuming normal operation of a hibernating cluster by using the console To resume normal operation of a hibernating cluster by using the console, complete the following steps: From the navigation menu, select Infrastructure > Clusters . Ensure that the Manage clusters tab is selected. Select Resume cluster from the Options menu for the cluster that you want to resume. The status for the cluster on the Clusters page is Ready when the process completes. Tip: You can resume multiple clusters by selecting the clusters that you want to resume on the Clusters page, and selecting Actions > Resume clusters . Your selected cluster is resuming normal operation. 1.6.8.4. Resuming normal operation of a hibernating cluster by using the CLI To resume normal operation of a hibernating cluster by using the CLI, complete the following steps: Enter the following command to edit the settings for the cluster: Replace name-of-cluster with the name of the cluster that you want to hibernate. Replace namespace-of-cluster with the namespace of the cluster that you want to hibernate. Change the value for spec.powerState to Running . Enter the following command to view the status of the cluster: Replace name-of-cluster with the name of the cluster that you want to hibernate. Replace namespace-of-cluster with the namespace of the cluster that you want to hibernate. When the process of resuming the cluster is complete, the value of the type for the cluster is type=Running . Your selected cluster is resuming normal operation. 1.6.9. Upgrading your cluster After you create Red Hat OpenShift Container Platform clusters that you want to manage with multicluster engine operator, you can use the multicluster engine operator console to upgrade those clusters to the latest minor version that is available in the version channel that the managed cluster uses. In a connected environment, the updates are automatically identified with notifications provided for each cluster that requires an upgrade in the console. 1.6.9.1. Prerequisites Verify that you meet all of the prerequisites for upgrading to that version. You must update the version channel on the managed cluster before you can upgrade the cluster with the console. Note: After you update the version channel on the managed cluster, the multicluster engine operator console displays the latest versions that are available for the upgrade. Your OpenShift Container Platform managed clusters must be in a Ready state. Important: You cannot upgrade Red Hat OpenShift Kubernetes Service managed clusters or OpenShift Container Platform managed clusters on Red Hat OpenShift Dedicated by using the multicluster engine operator console. 1.6.9.2. Upgrading your cluster in a connected environment To upgrade your cluster in a connected environment, complete the following steps: From the navigation menu, go to Infrastructure > Clusters . If an upgrade is available, it appears in the Distribution version column. Select the clusters in Ready state that you want to upgrade. You can only upgrade OpenShift Container Platform clusters in the console. Select Upgrade . Select the new version of each cluster. Select Upgrade . If your cluster upgrade fails, the Operator generally retries the upgrade a few times, stops, and reports the status of the failing component. In some cases, the upgrade process continues to cycle through attempts to complete the process. Rolling your cluster back to a version following a failed upgrade is not supported. Contact Red Hat support for assistance if your cluster upgrade fails. 1.6.9.3. Selecting a channel You can use the console to select a channel for your cluster upgrades on OpenShift Container Platform. After selecting a channel, you are automatically reminded of cluster upgrades that are available for both Errata versions and release versions. To select a channel for your cluster, complete the following steps: From the navigation, select Infrastructure > Clusters . Select the name of the cluster that you want to change to view the Cluster details page. If a different channel is available for the cluster, an edit icon is displayed in the Channel field. Click the Edit icon to change the setting in the field. Select a channel in the New channel field. You can find the reminders for the available channel updates in the Cluster details page of the cluster. 1.6.9.4. Upgrading a disconnected cluster You can use {cincinnati} with multicluster engine operator to upgrade clusters in a disconnected environment. In some cases, security concerns prevent clusters from being connected directly to the internet. This makes it difficult to know when upgrades are available, and how to process those upgrades. Configuring {cincinnati-short} can help. {cincinnati-short} is a separate operator and operand that monitors the available versions of your managed clusters in a disconnected environment, and makes them available for upgrading your clusters in a disconnected environment. After you configure {cincinnati-short}, it can perform the following actions: Monitor when upgrades are available for your disconnected clusters. Identify which updates are mirrored to your local site for upgrading by using the graph data file. Notify you that an upgrade is available for your cluster by using the console. The following topics explain the procedure for upgrading a disconnected cluster: Prerequisites Prepare your disconnected mirror registry Deploy the operator for {cincinnati-short} Build the graph data init container Configure certificate for the mirrored registry Deploy the {cincinnati-short} instance Override the default registry (optional) Deploy a disconnected catalog source Change the managed cluster parameter Viewing available upgrades Selecting a channel Upgrading the cluster 1.6.9.4.1. Prerequisites You must have the following prerequisites before you can use {cincinnati-short} to upgrade your disconnected clusters: A deployed hub cluster that is running on a supported OpenShift Container Platform version with restricted OLM configured. See Using Operator Lifecycle Manager on restricted networks for details about how to configure restricted OLM. Note: Make a note of the catalog source image when you configure restricted OLM. An OpenShift Container Platform cluster that is managed by the hub cluster Access credentials to a local repository where you can mirror the cluster images. See Disconnected installation mirroring for more information about how to create this repository. Note: The image for the current version of the cluster that you upgrade must always be available as one of the mirrored images. If an upgrade fails, the cluster reverts back to the version of the cluster at the time that the upgrade was attempted. 1.6.9.4.2. Prepare your disconnected mirror registry You must mirror both the image that you want to upgrade to and the current image that you are upgrading from to your local mirror registry. Complete the following steps to mirror the images: Create a script file that contains content that resembles the following example: 1 Replace /path/to/pull/secret with the path to your OpenShift Container Platform pull secret. Run the script to mirror the images, configure settings, and separate the release images from the release content. You can use the output of the last line of this script when you create your ImageContentSourcePolicy . 1.6.9.4.3. Deploy the operator for {cincinnati-short} To deploy the operator for {cincinnati-short} in your OpenShift Container Platform environment, complete the following steps: On the hub cluster, access the OpenShift Container Platform operator hub. Deploy the operator by selecting {cincinnati} Operator . Update the default values, if necessary. The deployment of the operator creates a new project named openshift-cincinnati . Wait for the installation of the operator to finish. You can check the status of the installation by entering the oc get pods command on your OpenShift Container Platform command line. Verify that the operator is in the running state. 1.6.9.4.4. Build the graph data init container {cincinnati-short} uses graph data information to determine the available upgrades. In a connected environment, {cincinnati-short} pulls the graph data information for available upgrades directly from the Cincinnati graph data GitHub repository . Because you are configuring a disconnected environment, you must make the graph data available in a local repository by using an init container . Complete the following steps to create a graph data init container : Clone the graph data Git repository by entering the following command: Create a file that contains the information for your graph data init . You can find this sample Dockerfile in the cincinnati-operator GitHub repository. The contents of the file is shown in the following sample: In this example: 1 The FROM value is the external registry where {cincinnati-short} finds the images. 2 3 The RUN commands create the directory and package the upgrade files. 4 The CMD command copies the package file to the local repository and extracts the files for an upgrade. Run the following commands to build the graph data init container : 1 Replace path_to_Dockerfile with the path to the file that you created in the step. 2 Replace USD{DISCONNECTED_REGISTRY}/cincinnati/cincinnati-graph-data-container with the path to your local graph data init container. 3 Replace /path/to/pull_secret with the path to your pull secret file. Note: You can also replace podman in the commands with docker , if you don't have podman installed. 1.6.9.4.5. Configure certificate for the mirrored registry If you are using a secure external container registry to store your mirrored OpenShift Container Platform release images, {cincinnati-short} requires access to this registry to build an upgrade graph. Complete the following steps to configure your CA certificate to work with the {cincinnati-short} pod: Find the OpenShift Container Platform external registry API, which is located in image.config.openshift.io . This is where the external registry CA certificate is stored. See Configuring additional trust stores for image registry access in the OpenShift Container Platform documentation for more information. Create a ConfigMap in the openshift-config namespace. Add your CA certificate under the key updateservice-registry . {cincinnati-short} uses this setting to locate your certificate: apiVersion: v1 kind: ConfigMap metadata: name: trusted-ca data: updateservice-registry: | -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- Edit the cluster resource in the image.config.openshift.io API to set the additionalTrustedCA field to the name of the ConfigMap that you created. Replace trusted-ca with the path to your new ConfigMap. The {cincinnati-short} Operator watches the image.config.openshift.io API and the ConfigMap you created in the openshift-config namespace for changes, then restart the deployment if the CA cert has changed. 1.6.9.4.6. Deploy the {cincinnati-short} instance When you finish deploying the {cincinnati-short} instance on your hub cluster, this instance is located where the images for the cluster upgrades are mirrored and made available to the disconnected managed cluster. Complete the following steps to deploy the instance: If you do not want to use the default namespace of the operator, which is openshift-cincinnati , create a namespace for your {cincinnati-short} instance: In the OpenShift Container Platform hub cluster console navigation menu, select Administration > Namespaces . Select Create Namespace . Add the name of your namespace, and any other information for your namespace. Select Create to create the namespace. In the Installed Operators section of the OpenShift Container Platform console, select {cincinnati} Operator . Select Create Instance in the menu. Paste the contents from your {cincinnati-short} instance. Your YAML instance might resemble the following manifest: apiVersion: cincinnati.openshift.io/v1beta2 kind: Cincinnati metadata: name: openshift-update-service-instance namespace: openshift-cincinnati spec: registry: <registry_host_name>:<port> 1 replicas: 1 repository: USD{LOCAL_REGISTRY}/ocp4/release graphDataImage: '<host_name>:<port>/cincinnati-graph-data-container' 2 1 Replace the spec.registry value with the path to your local disconnected registry for your images. 2 Replace the spec.graphDataImage value with the path to your graph data init container. This is the same value that you used when you ran the podman push command to push your graph data init container. Select Create to create the instance. From the hub cluster CLI, enter the oc get pods command to view the status of the instance creation. It might take a while, but the process is complete when the result of the command shows that the instance and the operator are running. 1.6.9.4.7. Override the default registry (optional) Note: The steps in this section only apply if you have mirrored your releases into your mirrored registry. OpenShift Container Platform has a default image registry value that specifies where it finds the upgrade packages. In a disconnected environment, you can create an override to replace that value with the path to your local image registry where you mirrored your release images. Complete the following steps to override the default registry: Create a YAML file named mirror.yaml that resembles the following content: apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: name: <your-local-mirror-name> 1 spec: repositoryDigestMirrors: - mirrors: - <your-registry> 2 source: registry.redhat.io 1 Replace your-local-mirror-name with the name of your local mirror. 2 Replace your-registry with the path to your local mirror repository. Note: You can find your path to your local mirror by entering the oc adm release mirror command. Using the command line of the managed cluster, run the following command to override the default registry: 1.6.9.4.8. Deploy a disconnected catalog source On the managed cluster, disable all of the default catalog sources and create a new one. Complete the following steps to change the default location from a connected location to your disconnected local registry: Create a YAML file named source.yaml that resembles the following content: apiVersion: config.openshift.io/v1 kind: OperatorHub metadata: name: cluster spec: disableAllDefaultSources: true --- apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: name: my-operator-catalog namespace: openshift-marketplace spec: sourceType: grpc image: '<registry_host_name>:<port>/olm/redhat-operators:v1' 1 displayName: My Operator Catalog publisher: grpc 1 Replace the value of spec.image with the path to your local restricted catalog source image. On the command line of the managed cluster, change the catalog source by running the following command: 1.6.9.4.9. Change the managed cluster parameter Update the ClusterVersion resource information on the managed cluster to change the default location from where it retrieves its upgrades. From the managed cluster, confirm that the ClusterVersion upstream parameter is currently the default public {cincinnati-short} operand by entering the following command: The returned content might resemble the following content with 4.x set as the supported version: apiVersion: v1 items: - apiVersion: config.openshift.io/v1 kind: ClusterVersion [..] spec: channel: stable-4.x upstream: https://api.openshift.com/api/upgrades_info/v1/graph From the hub cluster, identify the route URL to the {cincinnati-short} operand by entering the following command: Note the returned value for later steps. On the command line of the managed cluster, edit the ClusterVersion resource by entering the following command: Replace the value of spec.channel with your new version. Replace the value of spec.upstream with the path to your hub cluster {cincinnati-short} operand. You can complete the following steps to determine the path to your operand: Run the following command on the hub cluster: Find the path to cincinnati . The path the operand is the value in the HOST/PORT field. On the command line of the managed cluster, confirm that the upstream parameter in the ClusterVersion is updated with the local hub cluster {cincinnati-short} URL by entering the following command: The results resemble the following content: apiVersion: v1 items: - apiVersion: config.openshift.io/v1 kind: ClusterVersion [..] spec: channel: stable-4.x upstream: https://<hub-cincinnati-uri>/api/upgrades_info/v1/graph 1.6.9.4.10. Viewing available upgrades On the Clusters page, the Distribution version of the cluster indicates that there is an upgrade available, if there is an upgrade in the disconnected registry. You can view the available upgrades by selecting the cluster and selecting Upgrade clusters from the Actions menu. If the optional upgrade paths are available, the available upgrades are listed. Note: No available upgrade versions are shown if the current version is not mirrored into the local image repository. 1.6.9.4.11. Selecting a channel You can use the console to select a channel for your cluster upgrades on OpenShift Container Platform version 4.6 or later. Those versions must be available on the mirror registry. Complete the steps in Selecting a channel to specify a channel for your upgrades. 1.6.9.4.12. Upgrading the cluster After you configure the disconnected registry, multicluster engine operator and {cincinnati-short} use the disconnected registry to determine if upgrades are available. If no available upgrades are displayed, make sure that you have the release image of the current level of the cluster and at least one later level mirrored in the local repository. If the release image for the current version of the cluster is not available, no upgrades are available. On the Clusters page, the Distribution version of the cluster indicates that there is an upgrade available, if there is an upgrade in the disconnected registry. You can upgrade the image by clicking Upgrade available and selecting the version for the upgrade. The managed cluster is updated to the selected version. If your cluster upgrade fails, the Operator generally retries the upgrade a few times, stops, and reports the status of the failing component. In some cases, the upgrade process continues to cycle through attempts to complete the process. Rolling your cluster back to a version following a failed upgrade is not supported. Contact Red Hat support for assistance if your cluster upgrade fails. 1.6.10. Using cluster proxy add-ons In some environments, a managed cluster is behind a firewall and cannot be accessed directly by the hub cluster. To gain access, you can set up a proxy add-on to access the kube-apiserver of the managed cluster to provide a more secure connection. Important: There must not be a cluster-wide proxy configuration on your hub cluster. Required access: Editor To configure a cluster proxy add-on for a hub cluster and a managed cluster, complete the following steps: Configure the kubeconfig file to access the managed cluster kube-apiserver by completing the following steps: Provide a valid access token for the managed cluster. Note: : You can use the corresponding token of the service account. You can also use the default service account that is in the default namespace. Export the kubeconfig file of the managed cluster by running the following command: Add a role to your service account that allows it to access pods by running the following commands: Run the following command to locate the secret of the service account token: Replace default-token with the name of your secret. Run the following command to copy the token: Replace default-token with the name of your secret. Configure the kubeconfig file on the Red Hat Advanced Cluster Management hub cluster. Export the current kubeconfig file on the hub cluster by running the following command: Modify the server file with your editor. This example uses commands when using sed . Run alias sed=gsed , if you are using OSX. Delete the original user credentials by entering the following commands: Add the token of the service account: List all of the pods on the target namespace of the target managed cluster by running the following command: Replace the default namespace with the namespace that you want to use. Access other services on the managed cluster. This feature is available when the managed cluster is a Red Hat OpenShift Container Platform cluster. The service must use service-serving-certificate to generate server certificates: From the managed cluster, use the following service account token: From the hub cluster, convert the certificate authority to a file by running the following command: Get Prometheus metrics of the managed cluster by using the following commands: 1.6.10.1. Configuring proxy settings for cluster proxy add-ons You can configure the proxy settings for cluster proxy add-ons to allow a managed cluster to communicate with the hub cluster through a HTTP and HTTPS proxy server. You might need to configure the proxy settings if the cluster proxy add-on agent requires access to the hub cluster through the proxy server. To configure the proxy settings for the cluster proxy add-on, complete the following steps: Create an AddOnDeploymentConfig resource on your hub cluster and add the spec.proxyConfig parameter. See the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: <name> 1 namespace: <namespace> 2 spec: agentInstallNamespace: open-cluster-managment-agent-addon proxyConfig: httpsProxy: "http://<username>:<password>@<ip>:<port>" 3 noProxy: ".cluster.local,.svc,172.30.0.1" 4 caBundle: <value> 5 1 Add your add-on deployment config name. 2 Add your managed cluster name. 3 Specify either a HTTP proxy or a HTTPS proxy. 4 Add the IP address of the kube-apiserver . To get the IP address, run following command on your managed cluster: oc -n default describe svc kubernetes | grep IP: 5 If you specify a HTTPS proxy in the httpsProxy field, set the proxy server CA bundle. Update the ManagedClusterAddOn resource by referencing the AddOnDeploymentConfig resource that you created. See the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ManagedClusterAddOn metadata: name: cluster-proxy namespace: <namespace> 1 spec: installNamespace: open-cluster-managment-addon configs: group: addon.open-cluster-management.io resource: AddonDeploymentConfig name: <name> 2 namespace: <namespace> 3 1 Add your managed cluster name. 2 Add your add-on deployment config name. 3 Add your managed cluster name. Verify the proxy settings by checking if the cluster proxy agent pod in the open-cluster-managment-addon namespace has HTTPS_PROXY or NO_PROXY environment variables on the managed cluster. 1.6.11. Configuring Ansible Automation Platform tasks to run on managed clusters multicluster engine operator is integrated with Red Hat Ansible Automation Platform so that you can create prehook and posthook Ansible job instances that occur before or after creating or upgrading your clusters. Configuring prehook and posthook jobs for cluster destroy, and cluster scale actions are not supported. Required access: Cluster administrator Prerequisites Configuring an Automation template to run on a cluster by using the console Creating an Automation template Viewing the status of an Ansible job Pushing custom labels from the ClusterCurator resource to the automation job pod Using the ClusterCurator for Extended Update Support (EUS) upgrades 1.6.11.1. Prerequisites You must meet the following prerequisites to run Automation templates on your clusters: Install OpenShift Container Platform. Install the Ansible Automation Platform Resource Operator to connect Ansible jobs to the lifecycle of Git subscriptions. For best results when using the Automation template to launch Ansible Automation Platform jobs, the Ansible Automation Platform job template should be idempotent when it is run. You can find the Ansible Automation Platform Resource Operator in the OpenShift Container Platform OperatorHub . 1.6.11.2. Configuring an Automation template to run on a cluster by using the console You can specify the Automation template that you want to use for a cluster when you create the cluster, when you import the cluster, or after you create the cluster. To specify the template when creating or importing a cluster, select the Ansible template that you want to apply to the cluster in the Automation step. If there are no Automation templates, click Add automation template to create one. To specify the template after creating a cluster, click Update automation template in the action menu of an existing cluster. You can also use the Update automation template option to update an existing automation template. 1.6.11.3. Creating an Automation template To initiate an Ansible job with a cluster installation or upgrade, you must create an Automation template to specify when you want the jobs to run. They can be configured to run before or after the cluster installs or upgrades. To specify the details about running the Ansible template while creating a template, complete the steps in the console: Select Infrastructure > Automation from the navigation. Select the applicable path for your situation: If you want to create a new template, click Create Ansible template and continue with step 3. If you want to modify an existing template, click Edit template from the Options menu of the template that you want to modify and continue with step 5. Enter a unique name for your template, which contains lowercase alphanumeric characters or a hyphen (-). Select the credential that you want to use for the new template. After you select a credential, you can select an Ansible inventory to use for all the jobs. To link an Ansible credential to an Ansible template, complete the following steps: From the navigation, select Automation . Any template in the list of templates that is not linked to a credential contains a Link to credential icon that you can use to link the template to an existing credential. Only the credentials in the same namespace as the template are displayed. If there are no credentials that you can select, or if you do not want to use an existing credential, select Edit template from the Options menu for the template that you want to link. Click Add credential and complete the procedure in Creating a credential for Ansible Automation Platform if you have to create your credential. After you create your credential in the same namespace as the template, select the credential in the Ansible Automation Platform credential field when you edit the template. If you want to initiate any Ansible jobs before the cluster is installed, select Add an Automation template in the Pre-install Automation templates section. Select between a Job template or a Workflow job template in the modal that appears. You can also add job_tags , skip_tags , and workflow types. Use the Extra variables field to pass data to the AnsibleJob resource in the form of key=value pairs. Special keys cluster_deployment and install_config are passed automatically as extra variables. They contain general information about the cluster and details about the cluster installation configuration. Select the name of the prehook and posthook Ansible jobs to add to the installation or upgrade of the cluster. Drag the Ansible jobs to change the order, if necessary. Repeat steps 5 - 7 for any Automation templates that you want to initiate after the cluster is installed in the Post-install Automation templates section, the Pre-upgrade Automation templates section, and the Post-upgrade Automation templates section. When upgrading a cluster, you can use the Extra variables field to pass data to the AnsibleJob resource in the form of key=value pairs. In addition to the cluster_deployment and install_config special keys, the cluster_info special key is also passed automatically as an extra variable containing data from the ManagedClusterInfo resource. Your Ansible template is configured to run on clusters that specify this template when the designated actions occur. 1.6.11.4. Viewing the status of an Ansible job You can view the status of a running Ansible job to ensure that it started, and is running successfully. To view the current status of a running Ansible job, complete the following steps: In the menu, select Infrastructure > Clusters to access the Clusters page. Select the name of the cluster to view its details. View the status of the last run of the Ansible job on the cluster information. The entry shows one of the following statuses: When an install prehook or posthook job fails, the cluster status shows Failed . When an upgrade prehook or posthook job fails, a warning is displayed in the Distribution field that the upgrade failed. 1.6.11.5. Running a failed Ansible job again You can retry an upgrade from the Clusters page if the cluster prehook or posthook failed. To save time, you can also run only the failed Ansible posthooks that are part of cluster automation templates. Complete the following steps to run only the posthooks again, without retrying the entire upgrade: Add the following content to the root of the ClusterCurator resource to run the install posthook again: operation: retryPosthook: installPosthook Add the following content to the root of the ClusterCurator resource to run the upgrade posthook again: operation: retryPosthook: upgradePosthook After adding the content, a new job is created to run the Ansible posthook. 1.6.11.6. Specifying an Ansible inventory to use for all jobs You can use the ClusterCurator resource to specify an Ansible inventory to use for all jobs. See the following example. Replace channel and desiredUpdate with the correct values for your ClusterCurator : apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: test-inno namespace: test-inno spec: desiredCuration: upgrade destroy: {} install: {} scale: {} upgrade: channel: stable-4.x desiredUpdate: 4.x.1 monitorTimeout: 150 posthook: - extra_vars: {} clusterName: test-inno type: post_check name: ACM Upgrade Checks prehook: - extra_vars: {} clusterName: test-inno type: pre_check name: ACM Upgrade Checks towerAuthSecret: awx inventory: Demo Inventory Note: To use the example resource, the inventory must already exist in Ansible. You can verify that the inventory is created by checking the list of available Ansible inventories from the console. 1.6.11.7. Pushing custom labels from the ClusterCurator resource to the automation job pod You can use the ClusterCurator resource to push custom labels to the automation job pod created by the Cluster Curator. You can push the custom labels on all curation types. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: cluster1 {{{} namespace: cluster1 labels: test1: test1 test2: test2 {}}}spec: desiredCuration: install install: jobMonitorTimeout: 5 posthook: - extra_vars: {} name: Demo Job Template type: Job prehook: - extra_vars: {} name: Demo Job Template type: Job towerAuthSecret: toweraccess 1.6.11.8. Using the ClusterCurator for Extended Update Support (EUS) upgrades You can use the ClusterCurator resource to perform an easier, automatic upgrade between EUS releases. Add spec.upgrade.intermediateUpdate to the ClusterCurator resource with the intermediate release value. See the following sample, where the intermediate release is 4.13.x , and the desiredUpdate is 4.14.x : spec: desiredCuration: upgrade upgrade: intermediateUpdate: 4.13.x desiredUpdate: 4.14.x monitorTimeout: 120 Optional: You can pause the machineconfigpools to skip the intermediate release for faster upgrade. Enter Unpause machinepool in the posthook job, and pause machinepool in the prehook job. See the following example: posthook: - extra_vars: {} name: Unpause machinepool type: Job prehook: - extra_vars: {} name: Pause machinepool type: Job See the following full example of the ClusterCurator that is configured to upgrade EUS to EUS: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: annotations: cluster.open-cluster-management.io/upgrade-clusterversion-backoff-limit: "10" name: your-name namespace: your-namespace spec: desiredCuration: upgrade upgrade: intermediateUpdate: 4.13.x desiredUpdate: 4.14.x monitorTimeout: 120 posthook: - extra_vars: {} name: Unpause machinepool type: Job prehook: - extra_vars: {} name: Pause machinepool type: Job 1.6.12. Configuring Ansible Automation Platform jobs to run on hosted clusters Red Hat Ansible Automation Platform is integrated with multicluster engine operator so that you can create prehook and posthook Ansible Automation Platform job instances that occur before or after you create or update hosted clusters. Required access: Cluster administrator Prerequisites Running an Ansible Automation Platform job to install a hosted cluster Running an Ansible Automation Platform job to update a hosted cluster Running an Ansible Automation Platform job to delete a hosted cluster 1.6.12.1. Prerequisites You must meet the following prerequisites to run Automation templates on your clusters: A supported version of OpenShift Container Platform Install the Ansible Automation Platform Resource Operator to connect Ansible Automation Platform jobs to the lifecycle of Git subscriptions. When you use the Automation template to start Ansible Automation Platform jobs, ensure that the Ansible Automation Platform job template is idempotent when it is run. You can find the Ansible Automation Platform Resource Operator in the OpenShift Container Platform OperatorHub . 1.6.12.2. Running an Ansible Automation Platform job to install a hosted cluster To start an Ansible Automation Platform job that installs a hosted cluster, complete the following steps: Create the HostedCluster and NodePool resources, including the pausedUntil: true field. If you use the hcp create cluster command line interface command, you can specify the --pausedUntil: true flag. See the following examples: apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: my-cluster namespace: clusters spec: pausedUntil: 'true' ... apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: name: my-cluster-us-east-2 namespace: clusters spec: pausedUntil: 'true' ... Create a ClusterCurator resource with the same name as the HostedCluster resource and in the same namespace as the HostedCluster resource. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: my-cluster namespace: clusters labels: open-cluster-management: curator spec: desiredCuration: install install: jobMonitorTimeout: 5 prehook: - name: Demo Job Template extra_vars: variable1: something-interesting variable2: 2 - name: Demo Job Template posthook: - name: Demo Job Template towerAuthSecret: toweraccess If your Ansible Automation Platform Tower requires authentication, create a secret resource. See the following example: apiVersion: v1 kind: Secret metadata: name: toweraccess namespace: clusters stringData: host: https://my-tower-domain.io token: ANSIBLE_TOKEN_FOR_admin 1.6.12.3. Running an Ansible Automation Platform job to update a hosted cluster To run an Ansible Automation Platform job that updates a hosted cluster, edit the ClusterCurator resource of the hosted cluster that you want to update. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: my-cluster namespace: clusters labels: open-cluster-management: curator spec: desiredCuration: upgrade upgrade: desiredUpdate: 4.14.1 1 monitorTimeout: 120 prehook: - name: Demo Job Template extra_vars: variable1: something-interesting variable2: 2 - name: Demo Job Template posthook: - name: Demo Job Template towerAuthSecret: toweraccess 1 For details about supported versions, see Hosted control planes . Note: When you update a hosted cluster in this way, you update both the hosted control plane and the node pools to the same version. Updating the hosted control planes and node pools to different versions is not supported. 1.6.12.4. Running an Ansible Automation Platform job to delete a hosted cluster To run an Ansible Automation Platform job that deletes a hosted cluster, edit the ClusterCurator resource of the hosted cluster that you want to delete. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: my-cluster namespace: clusters labels: open-cluster-management: curator spec: desiredCuration: destroy destroy: jobMonitorTimeout: 5 prehook: - name: Demo Job Template extra_vars: variable1: something-interesting variable2: 2 - name: Demo Job Template posthook: - name: Demo Job Template towerAuthSecret: toweraccess Note: Deleting a hosted cluster on AWS is not supported. 1.6.12.5. Additional resources For more information about the hosted control plane command line interface, hcp , see Installing the hosted control plane command line interface . For more information about hosted clusters, including supported versions, see Hosted control planes . 1.6.13. ClusterClaims A ClusterClaim is a cluster-scoped custom resource definition (CRD) on a managed cluster. A ClusterClaim represents a piece of information that a managed cluster claims. You can use the ClusterClaim to determine the Placement of the resource on the target clusters. The following example shows a ClusterClaim that is identified in the YAML file: apiVersion: cluster.open-cluster-management.io/v1alpha1 kind: ClusterClaim metadata: name: id.openshift.io spec: value: 95f91f25-d7a2-4fc3-9237-2ef633d8451c The following table shows the defined ClusterClaim list for a cluster that multicluster engine operator manages: Claim name Reserved Mutable Description id.k8s.io true false ClusterID defined in upstream proposal kubeversion.open-cluster-management.io true true Kubernetes version platform.open-cluster-management.io true false Platform the managed cluster is running on, such as AWS, GCE, and Equinix Metal product.open-cluster-management.io true false Product name, such as OpenShift, Anthos, EKS and GKE id.openshift.io false false OpenShift Container Platform external ID, which is only available for an OpenShift Container Platform cluster consoleurl.openshift.io false true URL of the management console, which is only available for an OpenShift Container Platform cluster version.openshift.io false true OpenShift Container Platform version, which is only available for an OpenShift Container Platform cluster If any of the claims are deleted or updated on managed cluster, they are restored or rolled back to a version automatically. After the managed cluster joins the hub, any ClusterClaim that is created on a managed cluster is synchronized with the status of the ManagedCluster resource on the hub cluster. See the following example of clusterClaims for a ManagedCluster , replacing 4.x with a supported version of OpenShift Container Platform: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: labels: cloud: Amazon clusterID: 95f91f25-d7a2-4fc3-9237-2ef633d8451c installer.name: multiclusterhub installer.namespace: open-cluster-management name: cluster1 vendor: OpenShift name: cluster1 spec: hubAcceptsClient: true leaseDurationSeconds: 60 status: allocatable: cpu: '15' memory: 65257Mi capacity: cpu: '18' memory: 72001Mi clusterClaims: - name: id.k8s.io value: cluster1 - name: kubeversion.open-cluster-management.io value: v1.18.3+6c42de8 - name: platform.open-cluster-management.io value: AWS - name: product.open-cluster-management.io value: OpenShift - name: id.openshift.io value: 95f91f25-d7a2-4fc3-9237-2ef633d8451c - name: consoleurl.openshift.io value: 'https://console-openshift-console.apps.xxxx.dev04.red-chesterfield.com' - name: version.openshift.io value: '4.x' conditions: - lastTransitionTime: '2020-10-26T07:08:49Z' message: Accepted by hub cluster admin reason: HubClusterAdminAccepted status: 'True' type: HubAcceptedManagedCluster - lastTransitionTime: '2020-10-26T07:09:18Z' message: Managed cluster joined reason: ManagedClusterJoined status: 'True' type: ManagedClusterJoined - lastTransitionTime: '2020-10-30T07:20:20Z' message: Managed cluster is available reason: ManagedClusterAvailable status: 'True' type: ManagedClusterConditionAvailable version: kubernetes: v1.18.3+6c42de8 1.6.13.1. Create custom ClusterClaims You can create a ClusterClaim resource with a custom name on a managed cluster, which makes it easier to identify. The custom ClusterClaim resource is synchronized with the status of the ManagedCluster resource on the hub cluster. The following content shows an example of a definition for a customized ClusterClaim resource: apiVersion: cluster.open-cluster-management.io/v1alpha1 kind: ClusterClaim metadata: name: <custom_claim_name> spec: value: <custom_claim_value> The length of spec.value field must be 1024 or less. The create permission on resource clusterclaims.cluster.open-cluster-management.io is required to create a ClusterClaim resource. 1.6.13.2. List existing ClusterClaims You can use the kubectl command to list the ClusterClaims that apply to your managed cluster so that you can compare your ClusterClaim to an error message. Note: Make sure you have list permission on resource clusterclaims.cluster.open-cluster-management.io . Run the following command to list all existing ClusterClaims that are on the managed cluster: 1.6.14. ManagedClusterSets A ManagedClusterSet is a group of managed clusters. A managed cluster set, can help you manage access to all of your managed clusters. You can also create a ManagedClusterSetBinding resource to bind a ManagedClusterSet resource to a namespace. Each cluster must be a member of a managed cluster set. When you install the hub cluster, a ManagedClusterSet resource is created called default . All clusters that are not assigned to a managed cluster set are automatically assigned to the default managed cluster set. You cannot delete or update the default managed cluster set. Continue reading to learn more about how to create and manage managed cluster sets: Creating a ManagedClusterSet Assigning RBAC permissions to ManagedClusterSets Creating a ManagedClusterSetBinding resource Removing a cluster from a ManagedClusterSet 1.6.14.1. Creating a ManagedClusterSet You can group managed clusters together in a managed cluster set to limit the user access on managed clusters. Required access: Cluster administrator A ManagedClusterSet is a cluster-scoped resource, so you must have cluster administration permissions for the cluster where you are creating the ManagedClusterSet . A managed cluster cannot be included in more than one ManagedClusterSet . You can create a managed cluster set from either the multicluster engine operator console or from the CLI. Note: Cluster pools that are not added to a managed cluster set are not added to the default ManagedClusterSet resource. After a cluster is claimed from the cluster pool, the cluster is added to the default ManagedClusterSet . When you create a managed cluster, the following are automatically created to ease management: A ManagedClusterSet called global . The namespace called open-cluster-management-global-set . A ManagedClusterSetBinding called global to bind the global ManagedClusterSet to the open-cluster-management-global-set namespace. Important: You cannot delete, update, or edit the global managed cluster set. The global managed cluster set includes all managed clusters. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: name: global namespace: open-cluster-management-global-set spec: clusterSet: global 1.6.14.1.1. Creating a ManagedClusterSet by using the CLI Add the following definition of the managed cluster set to your YAML file to create a managed cluster set by using the CLI: apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSet metadata: name: <cluster_set> Replace <cluster_set> with the name of your managed cluster set. 1.6.14.1.2. Adding a cluster to a ManagedClusterSet After you create your ManagedClusterSet , you can add clusters to your managed cluster set by either following the instructions in the console or by using the CLI. 1.6.14.1.3. Adding clusters to a ManagedClusterSet by using the CLI Complete the following steps to add a cluster to a managed cluster set by using the CLI: Ensure that there is an RBAC ClusterRole entry that allows you to create on a virtual subresource of managedclustersets/join . Note: Without this permission, you cannot assign a managed cluster to a ManagedClusterSet . If this entry does not exist, add it to your YAML file. See the following example: kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: clusterrole1 rules: - apiGroups: ["cluster.open-cluster-management.io"] resources: ["managedclustersets/join"] resourceNames: ["<cluster_set>"] verbs: ["create"] Replace <cluster_set> with the name of your ManagedClusterSet . Note: If you are moving a managed cluster from one ManagedClusterSet to another, you must have that permission available on both managed cluster sets. Find the definition of the managed cluster in the YAML file. See the following example definition: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: <cluster_name> spec: hubAcceptsClient: true Add the cluster.open-cluster-management.io/clusterset paremeter and specify the name of the ManagedClusterSet . See the following example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: <cluster_name> labels: cluster.open-cluster-management.io/clusterset: <cluster_set> spec: hubAcceptsClient: true 1.6.14.2. Assigning RBAC permissions to a ManagedClusterSet You can assign users or groups to your cluster set that are provided by the configured identity providers on the hub cluster. Required access: Cluster administrator See the following table for the three ManagedClusterSet API RBAC permission levels: Cluster set Access permissions Create permissions admin Full access permission to all of the cluster and cluster pool resources that are assigned to the managed cluster set. Permission to create clusters, import clusters, and create cluster pools. The permissions must be assigned to the managed cluster set when it is created. bind Permission to bind the cluster set to a namespace by creating a ManagedClusterSetBinding . The user or group must also have permission to create the ManagedClusterSetBinding in the target namespace. Read only permissions to all of the cluster and cluster pool resources that are assigned to the managed cluster set. No permission to create clusters, import clusters, or create cluster pools. view Read only permission to all of the cluster and cluster pool resources that are assigned to the managed cluster set. No permission to create clusters, import clusters, or create cluster pools. Note: You cannot apply the Cluster set admin permission for the global cluster set. Complete the following steps to assign users or groups to your managed cluster set from the console: From the OpenShift Container Platform console, navigate to Infrastructure > Clusters . Select the Cluster sets tab. Select your target cluster set. Select the Access management tab. Select Add user or group . Search for, and select the user or group that you want to provide access. Select the Cluster set admin or Cluster set view role to give to the selected user or user group. See Overview of roles in multicluster engine operator Role-based access control for more information. Select Add to submit the changes. Your user or group is displayed in the table. It might take a few seconds for the permission assignments for all of the managed cluster set resources to be propagated to your user or group. See Filtering ManagedClusters from ManagedCusterSets for placement information. 1.6.14.3. Creating a ManagedClusterSetBinding resource A ManagedClusterSetBinding resource binds a ManagedClusterSet resource to a namespace. Applications and policies that are created in the same namespace can only access clusters that are included in the bound managed cluster set resource. Access permissions to the namespace automatically apply to a managed cluster set that is bound to that namespace. If you have access permissions to that namespace, you automatically have permissions to access any managed cluster set that is bound to that namespace. If you only have permissions to access the managed cluster set, you do not automatically have permissions to access other managed cluster sets on the namespace. You can create a managed cluster set binding by using the console or the command line. 1.6.14.3.1. Creating a ManagedClusterSetBinding by using the console Complete the following steps to create a ManagedClusterSetBinding by using the console: From the OpenShift Container Platform console, navigate to Infrastructure > Clusters and select the Cluster sets tab. Select the name of the cluster set that you want to create a binding for. Navigate to Actions > Edit namespace bindings . On the Edit namespace bindings page, select the namespace to which you want to bind the cluster set from the drop-down menu. 1.6.14.3.2. Creating a ManagedClusterSetBinding by using the CLI Complete the following steps to create a ManagedClusterSetBinding by using the CLI: Create the ManagedClusterSetBinding resource in your YAML file. Note: When you create a managed cluster set binding, the name of the managed cluster set binding must match the name of the managed cluster set to bind. Your ManagedClusterSetBinding resource might resemble the following information: apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: namespace: <namespace> name: <cluster_set> spec: clusterSet: <cluster_set> Ensure that you have the bind permission on the target managed cluster set. View the following example of a ClusterRole resource, which contains rules that allow the user to bind to <cluster_set> : apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: <clusterrole> rules: - apiGroups: ["cluster.open-cluster-management.io"] resources: ["managedclustersets/bind"] resourceNames: ["<cluster_set>"] verbs: ["create"] 1.6.14.4. Placing managed clusters by using taints and tolerations You can control the placement of your managed clusters or managed cluster sets by using taints and tolerations. Taints and tolerations provide a way to prevent managed clusters from being selected for certain placements. This control can be helpful if you want to prevent certain managed clusters from being included in some placements. You can add a taint to the managed cluster, and add a toleration to the placement. If the taint and the toleration do not match, then the managed cluster is not selected for that placement. 1.6.14.4.1. Adding a taint to a managed cluster Taints are specified in the properties of a managed cluster and allow a placement to repel a managed cluster or a set of managed clusters. If the taints section does not exist, you can add a taint to a managed cluster by running a command that resembles the following example: oc patch managedcluster <managed_cluster_name> -p '{"spec":{"taints":[{"key": "key", "value": "value", "effect": "NoSelect"}]}}' --type=merge Alternatively, you can append a taint to existing taints by running a command similar to the following example: oc patch managedcluster <managed_cluster_name> --type='json' -p='[{"op": "add", "path": "/spec/taints/-", "value": {"key": "key", "value": "value", "effect": "NoSelect"}}]' The specification of a taint includes the following fields: Required Key - The taint key that is applied to a cluster. This value must match the value in the toleration for the managed cluster to meet the criteria for being added to that placement. You can determine this value. For example, this value could be bar or foo.example.com/bar . Optional Value - The taint value for the taint key. This value must match the value in the toleration for the managed cluster to meet the criteria for being added to that placement. For example, this value could be value . Required Effect - The effect of the taint on placements that do not tolerate the taint, or what occurs when the taint and the toleration of the placement do not match. The value of the effects must be one of the following values: NoSelect - Placements are not allowed to select a cluster unless they tolerate this taint. If the cluster was selected by the placement before the taint was set, the cluster is removed from the placement decision. NoSelectIfNew - The scheduler cannot select the cluster if it is a new cluster. Placements can only select the cluster if they tolerate the taint and already have the cluster in their cluster decisions. Required TimeAdded - The time when the taint was added. This value is automatically set. 1.6.14.4.2. Identifying built-in taints to reflect the status of managed clusters When a managed cluster is not accessible, you do not want the cluster added to a placement. The following taints are automatically added to managed clusters that are not accessible: cluster.open-cluster-management.io/unavailable - This taint is added to a managed cluster when the cluster has a condition of ManagedClusterConditionAvailable with status of False . The taint has the effect of NoSelect and an empty value to prevent an unavailable cluster from being scheduled. An example of this taint is provided in the following content: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unavailable timeAdded: '2022-02-21T08:11:54Z' cluster.open-cluster-management.io/unreachable - This taint is added to a managed cluster when the status of the condition for ManagedClusterConditionAvailable is either Unknown or has no condition. The taint has effect of NoSelect and an empty value to prevent an unreachable cluster from being scheduled. An example of this taint is provided in the following content: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unreachable timeAdded: '2022-02-21T08:11:06Z' 1.6.14.4.3. Adding a toleration to a placement Tolerations are applied to placements, and allow the placements to repel managed clusters that do not have taints that match the tolerations of the placement. The specification of a toleration includes the following fields: Optional Key - The key matches the taint key to allow the placement. Optional Value - The value in the toleration must match the value of the taint for the toleration to allow the placement. Optional Operator - The operator represents the relationship between a key and a value. Valid operators are equal and exists . The default value is equal . A toleration matches a taint when the keys are the same, the effects are the same, and the operator is one of the following values: equal - The operator is equal and the values are the same in the taint and the toleration. exists - The wildcard for value, so a placement can tolerate all taints of a particular category. Optional Effect - The taint effect to match. When left empty, it matches all taint effects. The allowed values when specified are NoSelect or NoSelectIfNew . Optional TolerationSeconds - The length of time, in seconds, that the toleration tolerates the taint before moving the managed cluster to a new placement. If the effect value is not NoSelect or PreferNoSelect , this field is ignored. The default value is nil , which indicates that there is no time limit. The starting time of the counting of the TolerationSeconds is automatically listed as the TimeAdded value in the taint, rather than in the value of the cluster scheduled time or the TolerationSeconds added time. The following example shows how to configure a toleration that tolerates clusters that have taints: Taint on the managed cluster for this example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: gpu value: "true" timeAdded: '2022-02-21T08:11:06Z' Toleration on the placement that allows the taint to be tolerated apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement1 namespace: default spec: tolerations: - key: gpu value: "true" operator: Equal With the example tolerations defined, cluster1 could be selected by the placement because the key: gpu and value: "true" match. Note: A managed cluster is not guaranteed to be placed on a placement that contains a toleration for the taint. If other placements contain the same toleration, the managed cluster might be placed on one of those placements. 1.6.14.4.4. Specifying a temporary toleration The value of TolerationSeconds specifies the period of time that the toleration tolerates the taint. This temporary toleration can be helpful when a managed cluster is offline and you can transfer applications that are deployed on this cluster to another managed cluster for a tolerated time. For example, the managed cluster with the following taint becomes unreachable: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unreachable timeAdded: '2022-02-21T08:11:06Z' If you define a placement with a value for TolerationSeconds , as in the following example, the workload transfers to another available managed cluster after 5 minutes. apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: demo4 namespace: demo1 spec: tolerations: - key: cluster.open-cluster-management.io/unreachable operator: Exists tolerationSeconds: 300 The application is moved to another managed cluster after the managed cluster is unreachable for 5 minutes. 1.6.14.4.5. Additional resources To learn more about taints and tolerations, see Using taints and tolerations to control logging pod placement in the OpenShift Container Platform documentation. To learn how to use oc patch , see oc patch the OpenShift Container Platform documentation 1.6.14.5. Removing a managed cluster from a ManagedClusterSet You might want to remove a managed cluster from a managed cluster set to move it to a different managed cluster set, or remove it from the management settings of the set. You can remove a managed cluster from a managed cluster set by using the console or the CLI. Notes: Every managed cluster must be assigned to a managed cluster set. If you remove a managed cluster from a ManagedClusterSet and do not assign it to a different ManagedClusterSet , the cluster is automatically added to the default managed cluster set. If the Submariner add-on is installed on your managed cluster, you must uninstall the add-on before removing your managed cluster from a ManagedClusterSet . 1.6.14.5.1. Removing a cluster from a ManagedClusterSet by using the console Complete the following steps to remove a cluster from a managed cluster set by using the console: Click Infrastructure > Clusters and ensure that the Cluster sets tab is selected. Select the name of the cluster set that you want to remove from the managed cluster set to view the cluster set details. Select Actions > Manage resource assignments . On the Manage resource assignments page, remove the checkbox for the resources that you want to remove from the cluster set. This step removes a resource that is already a member of the cluster set. You can see if the resource is already a member of a cluster set by viewing the details of the managed cluster. Note: If you are moving a managed cluster from one managed cluster set to another, you must have the required RBAC permissions on both managed cluster sets. 1.6.14.5.2. Removing a cluster from a ManagedClusterSet by using the CLI To remove a cluster from a managed cluster set by using the command line, complete the following steps: Run the following command to display a list of managed clusters in the managed cluster set: Replace cluster_set with the name of the managed cluster set. Locate the entry for the cluster that you want to remove. Remove the label from the YAML entry for the cluster that you want to remove. See the following code for an example of the label: labels: cluster.open-cluster-management.io/clusterset: clusterset1 Note: If you are moving a managed cluster from one cluster set to another, you must have the required RBAC permission on both managed cluster sets. 1.6.15. Placement A placement resource is a namespace-scoped resource that defines a rule to select a set of ManagedClusters from the ManagedClusterSets , which are bound to the placement namespace. Required access: Cluster administrator, Cluster set administrator Continue reading to learn more about how to use placements: Placement overview Filtering ManagedClusters from ManagedCusterSets Checking selected ManagedClusters by using PlacementDecisions 1.6.15.1. Placement overview See the following information about how placement with managed clusters works: Kubernetes clusters are registered with the hub cluster as cluster-scoped ManagedClusters . The ManagedClusters are organized into cluster-scoped ManagedClusterSets . The ManagedClusterSets are bound to workload namespaces. The namespace-scoped placements specify a portion of ManagedClusterSets that select a working set of the potential ManagedClusters . Placements filter ManagedClusters from ManagedClusterSets by using labelSelector and claimSelector . The placement of ManagedClusters can be controlled by using taints and tolerations. Placements rank the clusters by the requirements and select a subset of clusters from them. Placements do not select managed clusters that you are deleting. Notes: You must bind at least one ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace. You must have role-based access to CREATE on the virtual sub-resource of managedclustersets/bind . 1.6.15.1.1. Additional resources See Using taints and tolerations to place managed clusters for more information. See Placements API to learn more about the API. Return to Selecting ManagedClusters with placement . 1.6.15.2. Filtering ManagedClusters from ManagedClusterSets You can select which ManagedClusters to filter by using labelSelector or claimSelector . See the following examples to learn how to use both filters: In the following example, the labelSelector only matches clusters with the label vendor: OpenShift : apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchLabels: vendor: OpenShift In the following example, claimSelector only matches clusters with region.open-cluster-management.io with us-west-1 : apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: predicates: - requiredClusterSelector: claimSelector: matchExpressions: - key: region.open-cluster-management.io operator: In values: - us-west-1 You can also filter ManagedClusters from particular cluster sets by using the clusterSets parameter. In the following example, claimSelector only matches the cluster sets clusterset1 and clusterset2 : apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: clusterSets: - clusterset1 - clusterset2 predicates: - requiredClusterSelector: claimSelector: matchExpressions: - key: region.open-cluster-management.io operator: In values: - us-west-1 You can also choose how many ManagedClusters you want to filter by using the numberOfClusters paremeter. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 3 1 predicates: - requiredClusterSelector: labelSelector: matchLabels: vendor: OpenShift claimSelector: matchExpressions: - key: region.open-cluster-management.io operator: In values: - us-west-1 1 Specify how many ManagedClusters you want to select. The example is set to 3 . 1.6.15.2.1. Filtering ManagedClusters by defining tolerations with placement To learn how to filter ManagedClusters with matching taints, see the following examples: By default, the placement cannot select cluster1 in the following example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: gpu value: "true" timeAdded: '2022-02-21T08:11:06Z' To select cluster1 you must define tolerations. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: tolerations: - key: gpu value: "true" operator: Equal You can also select ManagedClusters with matching taints for a specified amount of time by using the tolerationSeconds parameter. tolerationSeconds defines how long a toleration stays bound to a taint. tolerationSeconds can automatically transfer applications that are deployed on a cluster that goes offline to another managed cluster after a specified length of time. Learn how to use tolerationSeconds by viewing the following examples: In the following example, the managed cluster becomes unreachable: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unreachable timeAdded: '2022-02-21T08:11:06Z' If you define a placement with tolerationSeconds , the workload is transferred to another available managed cluster. See the following example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: tolerations: - key: cluster.open-cluster-management.io/unreachable operator: Exists tolerationSeconds: 300 1 1 Specify after how many seconds you want the workload to be transferred. 1.6.15.2.2. Prioritizing ManagedClusters by defining prioritizerPolicy with placement View the following examples to learn how to prioritize ManagedClusters by using the prioritizerPolicy parameter with placement. The following example selects a cluster with the largest allocatable memory: Note: Similar to Kubernetes Node Allocatable , 'allocatable' is defined as the amount of compute resources that are available for pods on each cluster. apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 1 prioritizerPolicy: configurations: - scoreCoordinate: builtIn: ResourceAllocatableMemory The following example selects a cluster with the largest allocatable CPU and memory, and makes placement sensitive to resource changes: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 1 prioritizerPolicy: configurations: - scoreCoordinate: builtIn: ResourceAllocatableCPU weight: 2 - scoreCoordinate: builtIn: ResourceAllocatableMemory weight: 2 The following example selects two clusters with the largest addOn score CPU ratio, and pins the placement decisions: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 2 prioritizerPolicy: mode: Exact configurations: - scoreCoordinate: builtIn: Steady weight: 3 - scoreCoordinate: type: AddOn addOn: resourceName: default scoreName: cpuratio 1.6.15.2.3. Filtering ManagedClusters based on add-on status You might want to select managed clusters for your placements based on the status of the add-ons that are deployed on them. For example, you can select a managed cluster for your placement only if there is a specific add-on that is enabled on the managed cluster. You can specify the label for the add-on, as well as its status, when you create the placement. A label is automatically created on a ManagedCluster resource if an add-on is enabled on the managed cluster. The label is automatically removed if the add-on is disabled. Each add-on is represented by a label in the format of feature.open-cluster-management.io/addon-<addon_name>=<status_of_addon> . Replace addon_name with the name of the add-on that you want to enable on the selected managed cluster. Replace status_of_addon with the status that you want the add-on to have if the managed cluster is selected. See the following table of possible value for status_of_addon : Value Description available The add-on is enabled and available. unhealthy The add-on is enabled, but the lease is not updated continuously. unreachable The add-on is enabled, but there is no lease found for it. This can also be caused when the managed cluster is offline. For example, an available application-manager add-on is represented by a label on the managed cluster that reads the following: See the following examples to learn how to create placements based on add-ons and their status: The following placement example includes all managed clusters that have application-manager enabled on them: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement1 namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: feature.open-cluster-management.io/addon-application-manager operator: Exists The following placement example includes all managed clusters that have application-manager enabled with an available status: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement2 namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchLabels: "feature.open-cluster-management.io/addon-application-manager": "available" The following placement example includes all managed clusters that have application-manager disabled: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement3 namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: feature.open-cluster-management.io/addon-application-manager operator: DoesNotExist 1.6.15.2.4. Additional resources See Node Allocatable for more details. Return to Selecting ManagedClusters with placement for other topics. 1.6.15.3. Checking selected ManagedClusters by using PlacementDecisions One or more PlacementDecision kinds with the label cluster.open-cluster-management.io/placement={placement_name} are created to represent ManagedClusters selected by a placement. If a ManagedCluster is selected and added to a PlacementDecision , components that consume this placement might apply the workload on this ManagedCluster . After the ManagedCluster is no longer selected and is removed from the PlacementDecision , the workload that is applied on this ManagedCluster is removed. See PlacementDecisions API to learn more about the API. See the following PlacementDecision example: apiVersion: cluster.open-cluster-management.io/v1beta1 kind: PlacementDecision metadata: labels: cluster.open-cluster-management.io/placement: placement1 name: placement1-kbc7q namespace: ns1 ownerReferences: - apiVersion: cluster.open-cluster-management.io/v1beta1 blockOwnerDeletion: true controller: true kind: Placement name: placement1 uid: 05441cf6-2543-4ecc-8389-1079b42fe63e status: decisions: - clusterName: cluster1 reason: '' - clusterName: cluster2 reason: '' - clusterName: cluster3 reason: '' 1.6.15.3.1. Additional resources See PlacementDecisions API fore more details. 1.6.16. Managing cluster pools (Technology Preview) Cluster pools provide rapid and cost-effective access to configured Red Hat OpenShift Container Platform clusters on-demand and at scale. Cluster pools provision a configurable and scalable number of OpenShift Container Platform clusters on Amazon Web Services, Google Cloud Platform, or Microsoft Azure that can be claimed when they are needed. They are especially useful when providing or replacing cluster environments for development, continuous integration, and production scenarios. You can specify a number of clusters to keep running so that they are available to be claimed immediately, while the remainder of the clusters will be kept in a hibernating state so that they can be resumed and claimed within a few minutes. ClusterClaim resources are used to check out clusters from cluster pools. When a cluster claim is created, the pool assigns a running cluster to it. If no running clusters are available, a hibernating cluster is resumed to provide the cluster or a new cluster is provisioned. The cluster pool automatically creates new clusters and resumes hibernating clusters to maintain the specified size and number of available running clusters in the pool. Creating a cluster pool Claiming clusters from cluster pools Updating the cluster pool release image Scaling cluster pools Destroying a cluster pool The procedure for creating a cluster pool is similar to the procedure for creating a cluster. Clusters in a cluster pool are not created for immediate use. 1.6.16.1. Creating a cluster pool The procedure for creating a cluster pool is similar to the procedure for creating a cluster. Clusters in a cluster pool are not created for immediate use. Required access : Administrator 1.6.16.1.1. Prerequisites See the following prerequisites before creating a cluster pool: You need to deploy a multicluster engine operator hub cluster. You need Internet access for your multicluster engine operator hub cluster so that it can create the Kubernetes cluster on the provider environment. You need an AWS, GCP, or Microsoft Azure provider credential. See Managing credentials overview for more information. You need a configured domain in your provider environment. See your provider documentation for instructions about how to configure a domain. You need provider login credentials. You need your OpenShift Container Platform image pull secret. See Using image pull secrets . Note: Adding a cluster pool with this procedure configures it so it automatically imports the cluster for multicluster engine operator management when you claim a cluster from the pool. If you want to create a cluster pool that does not automatically import the claimed cluster for management with the cluster claim, add the following annotation to your clusterClaim resource: kind: ClusterClaim metadata: annotations: cluster.open-cluster-management.io/createmanagedcluster: "false" 1 1 The word "false" must be surrounded by quotation marks to indicate that it is a string. 1.6.16.1.2. Create the cluster pool To create a cluster pool, select Infrastructure > Clusters in the navigation menu. The Cluster pools tab lists the cluster pools that you can access. Select Create cluster pool and complete the steps in the console. If you do not have a infrastructure credential that you want to use for the cluster pool, you can create one by selecting Add credential . You can either select an existing namespace from the list, or type the name of a new one to create one. The cluster pool does not have to be in the same namespace as the clusters. You can select a cluster set name if you want the RBAC roles for your cluster pool to share the role assignments of an existing cluster set. The cluster set for the clusters in the cluster pool can only be set when you create the cluster pool. You cannot change the cluster set association for the cluster pool or for the clusters in the cluster pool after you create the cluster pool. Any cluster that you claim from the cluster pool is automatically added to the same cluster set as the cluster pool. Note: If you do not have cluster admin permissions, you must select a cluster set. The request to create a cluster set is rejected with a forbidden error if you do not include the cluster set name in this situation. If no cluster sets are available for you to select, contact your cluster administrator to create a cluster set and give you clusterset admin permissions to it. The cluster pool size specifies the number of clusters that you want provisioned in your cluster pool, while the cluster pool running count specifies the number of clusters that the pool keeps running and ready to claim for immediate use. The procedure is very similar to the procedure for creating clusters. For specific information about the information that is required for your provider, see the following information: Creating a cluster on Amazon Web Services Creating a cluster on Google Cloud Platform Creating a cluster on Microsoft Azure 1.6.16.2. Claiming clusters from cluster pools ClusterClaim resources are used to check out clusters from cluster pools. A claim is completed when a cluster is running and ready in the cluster pool. The cluster pool automatically creates new running and hibernated clusters in the cluster pool to maintain the requirements that are specified for the cluster pool. Note: When a cluster that was claimed from the cluster pool is no longer needed and is destroyed, the resources are deleted. The cluster does not return to the cluster pool. Required access : Administrator 1.6.16.2.1. Prerequisite You must have the following available before claiming a cluster from a cluster pool: A cluster pool with or without available clusters. If there are available clusters in the cluster pool, the available clusters are claimed. If there are no available clusters in the cluster pool, a cluster is created to fulfill the claim. See Creating a cluster pool for information about how to create a cluster pool. 1.6.16.2.2. Claim the cluster from the cluster pool When you create a cluster claim, you request a new cluster from the cluster pool. A cluster is checked out from the pool when a cluster is available. The claimed cluster is automatically imported as one of your managed clusters, unless you disabled automatic import. Complete the following steps to claim a cluster: From the navigation menu, click Infrastructure > Clusters , and select the Cluster pools tab. Find the name of the cluster pool you want to claim a cluster from and select Claim cluster . If a cluster is available, it is claimed and immediately appears in the Managed clusters tab. If there are no available clusters, it might take several minutes to resume a hibernated cluster or provision a new cluster. During this time, the claim status is pending . Expand the cluster pool to view or delete pending claims against it. The claimed cluster remains a member of the cluster set that it was associated with when it was in the cluster pool. You cannot change the cluster set of the claimed cluster when you claim it. Note: Changes to the pull secret, SSH keys, or base domain of the cloud provider credentials are not reflected for existing clusters that are claimed from a cluster pool, as they have already been provisioned using the original credentials. You cannot edit cluster pool information by using the console, but you can update it by updating its information using the CLI interface. You can also create a new cluster pool with a credential that contains the updated information. The clusters that are created in the new pool use the settings provided in the new credential. 1.6.16.3. Updating the cluster pool release image When the clusters in your cluster pool remain in hibernation for some time, the Red Hat OpenShift Container Platform release image of the clusters might become backlevel. If this happens, you can upgrade the version of the release image of the clusters that are in your cluster pool. Required access : Edit Complete the following steps to update the OpenShift Container Platform release image for the clusters in your cluster pool: Note: This procedure does not update clusters from the cluster pool that are already claimed in the cluster pool. After you complete this procedure, the updates to the release images only apply to the following clusters that are related to the cluster pool: Clusters that are created by the cluster pool after updating the release image with this procedure. Clusters that are hibernating in the cluster pool. The existing hibernating clusters with the old release image are destroyed, and new clusters with the new release image replace them. From the navigation menu, click Infrastructure > Clusters . Select the Cluster pools tab. Find the name of the cluster pool that you want to update in the Cluster pools table. Click the Options menu for the Cluster pools in the table, and select Update release image . Select a new release image to use for future cluster creations from this cluster pool. The cluster pool release image is updated. Tip: You can update the release image for multiple cluster pools with one action by selecting the box for each of the cluster pools and using the Actions menu to update the release image for the selected cluster pools. 1.6.16.4. Scaling cluster pools (Technology Preview) You can change the number of clusters in the cluster pool by increasing or decreasing the number of clusters in the cluster pool size. Required access : Cluster administrator Complete the following steps to change the number of clusters in your cluster pool: From the navigation menu, click Infrastructure > Clusters . Select the Cluster pools tab. In the Options menu for the cluster pool that you want to change, select Scale cluster pool . Change the value of the pool size. Optionally, you can update the number of running clusters to increase or decrease the number of clusters that are immediately available when you claim them. Your cluster pools are scaled to reflect your new values. 1.6.16.5. Destroying a cluster pool If you created a cluster pool and determine that you no longer need it, you can destroy the cluster pool. Important: You can only destroy cluster pools that do not have any cluster claims. Required access : Cluster administrator To destroy a cluster pool, complete the following steps: From the navigation menu, click Infrastructure > Clusters . Select the Cluster pools tab. In the Options menu for the cluster pool that you want to delete, type confirm in the confirmation box and select Destroy . Notes: The Destroy button is disabled if the cluster pool has any cluster claims. The namespace that contains the cluster pool is not deleted. Deleting the namespace destroys any clusters that have been claimed from the cluster pool, since the cluster claim resources for these clusters are created in the same namespace. Tip: You can destroy multiple cluster pools with one action by selecting the box for each of the cluster pools and using the Actions menu to destroy the selected cluster pools. 1.6.17. Enabling ManagedServiceAccount add-ons When you install a supported version of multicluster engine operator, the ManagedServiceAccount add-on is enabled by default. Important: If you upgraded your hub cluster from multicluster engine operator version 2.4 and did not enable the ManagedServiceAccount add-on before upgrading, you must enable the add-on manually. The ManagedServiceAccount allows you to create or delete a service account on a managed cluster. Required access: Editor When a ManagedServiceAccount custom resource is created in the <managed_cluster> namespace on the hub cluster, a ServiceAccount is created on the managed cluster. A TokenRequest is made with the ServiceAccount on the managed cluster to the Kubernetes API server on the managed cluster. The token is then stored in a Secret in the <target_managed_cluster> namespace on the hub cluster. Note: The token can expire and be rotated. See TokenRequest for more information about token requests. 1.6.17.1. Prerequisites You need a supported Red Hat OpenShift Container Platform environment. You need the multicluster engine operator installed. 1.6.17.2. Enabling ManagedServiceAccount To enable a ManagedServiceAccount add-on for a hub cluster and a managed cluster, complete the following steps: Enable the ManagedServiceAccount add-on on hub cluster. See Advanced configuration to learn more. Deploy the ManagedServiceAccount add-on and apply it to your target managed cluster. Create the following YAML file and replace target_managed_cluster with the name of the managed cluster where you are applying the Managed-ServiceAccount add-on: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ManagedClusterAddOn metadata: name: managed-serviceaccount namespace: <target_managed_cluster> spec: installNamespace: open-cluster-management-agent-addon Run the following command to apply the file: You have now enabled the ManagedServiceAccount plug-in for your managed cluster. See the following steps to configure a ManagedServiceAccount . Create a ManagedServiceAccount custom resource with the following YAML source: apiVersion: authentication.open-cluster-management.io/v1alpha1 kind: ManagedServiceAccount metadata: name: <managedserviceaccount_name> namespace: <target_managed_cluster> spec: rotation: {} Replace managed_serviceaccount_name with the name of your ManagedServiceAccount . Replace target_managed_cluster with the name of the managed cluster to which you are applying the ManagedServiceAccount . To verify, view the tokenSecretRef attribute in the ManagedServiceAccount object status to find the secret name and namespace. Run the following command with your account and cluster name: View the Secret containing the retrieved token that is connected to the created ServiceAccount on the managed cluster. Run the following command: 1.6.18. Cluster lifecycle advanced configuration You can configure some cluster settings during or after installation. 1.6.18.1. Customizing API server certificates The managed clusters communicate with the hub cluster through a mutual connection with the OpenShift Kube API server external load balancer. The default OpenShift Kube API server certificate is issued by an internal Red Hat OpenShift Container Platform cluster certificate authority (CA) when OpenShift Container Platform is installed. If necessary, you can add or change certificates. Changing the API server certificate might impact the communication between the managed cluster and the hub cluster. When you add the named certificate before installing the product, you can avoid an issue that might leave your managed clusters in an offline state. The following list contains some examples of when you might need to update your certificates: You want to replace the default API server certificate for the external load balancer with your own certificate. By following the guidance in Adding API server certificates in the OpenShift Container Platform documentation, you can add a named certificate with host name api.<cluster_name>.<base_domain> to replace the default API server certificate for the external load balancer. Replacing the certificate might cause some of your managed clusters to move to an offline state. If your clusters are in an offline state after upgrading the certificates, follow the troubleshooting instructions for Troubleshooting imported clusters offline after certificate change to resolve it. Note: Adding the named certificate before installing the product helps to avoid your clusters moving to an offline state. The named certificate for the external load balancer is expiring and you need to replace it. If both the old and the new certificate share the same root CA certificate, despite the number of intermediate certificates, you can follow the guidance in Adding API server certificates in the OpenShift Container Platform documentation to create a new secret for the new certificate. Then update the serving certificate reference for host name api.<cluster_name>.<base_domain> to the new secret in the APIServer custom resource. Otherwise, when the old and new certificates have different root CA certificates, complete the following steps to replace the certificate: Locate your APIServer custom resource, which resembles the following example: apiVersion: config.openshift.io/v1 kind: APIServer metadata: name: cluster spec: audit: profile: Default servingCerts: namedCertificates: - names: - api.mycluster.example.com servingCertificate: name: old-cert-secret Create a new secret in the openshift-config namespace that contains the content of the existing and new certificates by running the following commands: Copy the old certificate into a new certificate: cp old.crt combined.crt Add the contents of the new certificate to the copy of the old certificate: cat new.crt >> combined.crt Apply the combined certificates to create a secret: oc create secret tls combined-certs-secret --cert=combined.crt --key=old.key -n openshift-config Update your APIServer resource to reference the combined certificate as the servingCertificate . apiVersion: config.openshift.io/v1 kind: APIServer metadata: name: cluster spec: audit: profile: Default servingCerts: namedCertificates: - names: - api.mycluster.example.com servingCertificate: name: combined-cert-secret After about 15 minutes, the CA bundle containing both new and old certificates is propagated to the managed clusters. Create another secret named new-cert-secret in the openshift-config namespace that contains only the new certificate information by entering the following command: oc create secret tls new-cert-secret --cert=new.crt --key=new.key -n openshift-config {code} Update the APIServer resource by changing the name of servingCertificate to reference the new-cert-secret . Your resource might resemble the following example: apiVersion: config.openshift.io/v1 kind: APIServer metadata: name: cluster spec: audit: profile: Default servingCerts: namedCertificates: - names: - api.mycluster.example.com servingCertificate: name: new-cert-secret After about 15 minutes, the old certificate is removed from the CA bundle, and the change is automatically propagated to the managed clusters. Note: Managed clusters must use the host name api.<cluster_name>.<base_domain> to access the hub cluster. You cannot use named certificates that are configured with other host names. 1.6.18.2. Configuring the proxy between hub cluster and managed cluster To register a managed cluster to your multicluster engine for Kubernetes operator hub cluster, you need to transport the managed cluster to your multicluster engine operator hub cluster. Sometimes your managed cluster cannot directly reach your multicluster engine operator hub cluster. In this instance, configure the proxy settings to allow the communications from the managed cluster to access the multicluster engine operator hub cluster through a HTTP or HTTPS proxy server. For example, the multicluster engine operator hub cluster is in a public cloud, and the managed cluster is in a private cloud environment behind firewalls. The communications out of the private cloud can only go through a HTTP or HTTPS proxy server. 1.6.18.2.1. Prerequisites You have a HTTP or HTTPS proxy server running that supports HTTP tunnels. For example, HTTP connect method. You have a manged cluster that can reach the HTTP or HTTPS proxy server, and the proxy server can access the multicluster engine operator hub cluster. Complete the following steps to configure the proxy settings between hub cluster and managed cluster: Create a KlusterConfig resource with proxy settings. See the following configuration with HTTP proxy: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: http-proxy spec: hubKubeAPIServerProxyConfig: httpProxy: "http://<username>:<password>@<ip>:<port>" See the following configuration with HTTPS proxy: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: https-proxy spec: hubKubeAPIServerProxyConfig: httpsProxy: "https://<username>:<password>@<ip>:<port>" caBundle: <user-ca-bundle> Note: A CA certificate is required when the HTTPS proxy server is configured. The HTTPS proxy is used if both HTTP proxy and HTTPS proxy are specified. When creating a managed cluster, choose the KlusterletConfig resource by adding an annotation that refers to the KlusterletConfig resource. See the following example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: agent.open-cluster-management.io/klusterlet-config: <klusterlet-config-name> name:<managed-cluster-name> spec: hubAcceptsClient: true leaseDurationSeconds: 60 Notes: You might need to toggle the YAML view to add the annotation to the ManagedCluster resource when you operate on the multicluster engine operator console. You can use a global KlusterletConfig to enable the configuration on every managed cluster without using an annotation for binding. 1.6.18.2.2. Disabling the proxy between hub cluster and managed cluster If your development changes, you might need to disable the HTTP or HTTPS proxy. Go to ManagedCluster resource. Remove the annotation, agent.open-cluster-management.io/klusterlet-config . 1.6.18.2.3. Optional: Configuring the klusterlet to run on specific nodes When you create a cluster using Red Hat Advanced Cluster Management for Kubernetes, you can specify which nodes you want to run the managed cluster klusterlet to run on by configuring the nodeSelector and tolerations annotation for the managed cluster. Complete the following steps to configure these settings: Select the managed cluster that you want to update from the clusters page in the console. Set the YAML switch to On to view the YAML content. Note: The YAML editor is only available when importing or creating a cluster. To edit the managed cluster YAML definition after importing or creating, you must use the OpenShift Container Platform command-line interface or the Red Hat Advanced Cluster Management search feature. Add the nodeSelector annotation to the managed cluster YAML definition. The key for this annotation is: open-cluster-management/nodeSelector . The value of this annotation is a string map with JSON formatting. Add the tolerations entry to the managed cluster YAML definition. The key of this annotation is: open-cluster-management/tolerations . The value of this annotation represents a toleration list with JSON formatting. The resulting YAML might resemble the following example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: open-cluster-management/nodeSelector: '{"dedicated":"acm"}' open-cluster-management/tolerations: '[{"key":"dedicated","operator":"Equal","value":"acm","effect":"NoSchedule"}]' 1.6.18.3. Customizing the server URL and CA bundle of the hub cluster API server when importing a managed cluster (Technology Preview) You might not be able to register a managed cluster on your multicluster engine operator hub cluster if intermediate components exist between the managed cluster and the hub cluster. Example intermediate components include a Virtual IP, load balancer, reverse proxy, or API gateway. If you have an intermediate component, you must use a custom server URL and CA bundle for the hub cluster API server when importing a managed cluster. 1.6.18.3.1. Prerequisites You must configure the intermediate component so that the hub cluster API server is accessible for the managed cluster. If the intermediate component terminates the SSL connections between the managed cluster and hub cluster API server, you must bridge the SSL connections and pass the authentication information from the original requests to the back end of the hub cluster API server. You can use the User Impersonation feature of the Kubernetes API server to bridge the SSL connections. The intermediate component extracts the client certificate from the original requests, adds Common Name (CN) and Organization (O) of the certificate subject as impersonation headers, and then forwards the modified impersonation requests to the back end of the hub cluster API server. Note: If you bridge the SSL connections, the cluster proxy add-on does not work. 1.6.18.3.2. Customizing the server URL and hub cluster CA bundle To use a custom hub API server URL and CA bundle when importing a managed cluster, complete the following steps: Create a KlusterConfig resource with the custom hub cluster API server URL and CA bundle. See the following example: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: 1 spec: hubKubeAPIServerURL: "https://api.example.com:6443" 2 hubKubeAPIServerCABundle: "LS0tLS1CRU...LS0tCg==" 3 1 Add your klusterlet config name. 2 Add your custom server URL. 3 Add your custom CA bundle. Select the KlusterletConfig resource when creating a managed cluster by adding an annotation that refers to the resource. See the following example: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: agent.open-cluster-management.io/klusterlet-config: 1 name: 2 spec: hubAcceptsClient: true leaseDurationSeconds: 60 1 Add your klusterlet config name. 2 Add your cluster name. Notes: If you use the console, you might need to enable the YAML view to add the annotation to the ManagedCluster resource. You can use a global KlusterletConfig to enable the configuration on every managed cluster without using an annotation for binding. 1.6.18.3.3. Configuring the global KlusterletConfig If you create a KlusterletConfig resource and set the name to global , the configurations in the global KlusterletConfig are automatically applied on every managed cluster. If you create another KlusterletConfig in an environment that has a global KlusterletConfig and bind it with a managed cluster, the value of your KlusterletConfig overrides the global KlusterletConfig value if you set different values for the same field. See the following example where the hubKubeAPIServerURL field has different values set in your KlusterletConfig and the global KlusterletConfig . The "example.test.com" value overrides the "example.global.com" value: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: test spec: hubKubeAPIServerURL: "example.test.com" - apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerURL: "example.global.com" If you create another KlusterletConfig in an environment that has a global KlusterletConfig and bind it with a managed cluster, the value of the global KlusterletConfig is used if the same field is missing or does not have a value in your KlusterletConfig . See the following examples, where the "example.global.com" value in the hubKubeAPIServerURL field of the global KlusterletConfig overrides your KlusterletConfig . apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: test spec: hubKubeAPIServerURL: "" - apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerURL: "example.global.com" apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: test - apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerURL: "example.global.com" 1.6.18.4. Additional resources Adding API server certificates Troubleshooting imported clusters offline after certificate change Configuring proxy settings for cluster proxy add-ons 1.6.19. Removing a cluster from management When you remove an OpenShift Container Platform cluster from management that was created with multicluster engine operator, you can either detach it or destroy it. Detaching a cluster removes it from management, but does not completely delete it. You can import it again if you want to manage it. This is only an option when the cluster is in a Ready state. The following procedures remove a cluster from management in either of the following situations: You already deleted the cluster and want to remove the deleted cluster from Red Hat Advanced Cluster Management. You want to remove the cluster from management, but have not deleted the cluster. Important: Destroying a cluster removes it from management and deletes the components of the cluster. When you detach or destroy a managed cluster, the related namespace is automatically deleted. Do not place custom resources in this namespace. Removing a cluster by using the console Removing a cluster by using the command line Removing remaining resources after removing a cluster Defragmenting the etcd database after removing a cluster 1.6.19.1. Removing a cluster by using the console From the navigation menu, navigate to Infrastructure > Clusters and select Destroy cluster or Detach cluster from the options menu beside the cluster that you want to remove from management. Tip: You can detach or destroy multiple clusters by selecting the check boxes of the clusters that you want to detach or destroy and selecting Detach or Destroy . Note: If you attempt to detach the hub cluster while it is managed, which is called a local-cluster , check to see if the default setting of disableHubSelfManagement is false . This setting causes the hub cluster to reimport itself and manage itself when it is detached, and it reconciles the MultiClusterHub controller. It might take hours for the hub cluster to complete the detachment process and reimport. To reimport the hub cluster without waiting for the processes to finish, you can enter the following command to restart the multiclusterhub-operator pod and reimport faster: You can change the value of the hub cluster to not import automatically by changing the disableHubSelfManagement value to true , as described in Installing while connected online . 1.6.19.2. Removing a cluster by using the command line To detach a managed cluster by using the command line of the hub cluster, run the following command: To destroy the managed cluster after detaching, run the following command: Notes: To prevent destroying the managed cluster, set the spec.preserveOnDelete parameter to true in the ClusterDeployment custom resource. The default setting of disableHubSelfManagement is false . The false`setting causes the hub cluster, also called `local-cluster , to reimport and manage itself when it is detached and it reconciles the MultiClusterHub controller. The detachment and reimport process might take hours might take hours for the hub cluster to complete. If you want to reimport the hub cluster without waiting for the processes to finish, you can enter the following command to restart the multiclusterhub-operator pod and reimport faster: You can change the value of the hub cluster to not import automatically by changing the disableHubSelfManagement value to true . See Installing while connected online . 1.6.19.3. Removing remaining resources after removing a cluster If there are remaining resources on the managed cluster that you removed, there are additional steps that are required to ensure that you remove all of the remaining components. Situations when these extra steps are required include the following examples: The managed cluster was detached before it was completely created, and components like the klusterlet remain on the managed cluster. The hub that was managing the cluster was lost or destroyed before detaching the managed cluster, and there is no way to detach the managed cluster from the hub. The managed cluster was not in an online state when it was detached. If one of these situations apply to your attempted detachment of a managed cluster, there are some resources that cannot be removed from managed cluster. Complete the following steps to detach the managed cluster: Make sure you have the oc command line interface configured. Make sure you have KUBECONFIG configured on your managed cluster. If you run oc get ns | grep open-cluster-management-agent , you should see two namespaces: Remove the klusterlet custom resource by using the following command: oc get klusterlet | grep klusterlet | awk '{print USD1}' | xargs oc patch klusterlet --type=merge -p '{"metadata":{"finalizers": []}}' Run the following command to remove the remaining resources: oc delete namespaces open-cluster-management-agent open-cluster-management-agent-addon --wait=false oc get crds | grep open-cluster-management.io | awk '{print USD1}' | xargs oc delete crds --wait=false oc get crds | grep open-cluster-management.io | awk '{print USD1}' | xargs oc patch crds --type=merge -p '{"metadata":{"finalizers": []}}' Run the following command to ensure that both namespaces and all open cluster management crds are removed: oc get crds | grep open-cluster-management.io | awk '{print USD1}' oc get ns | grep open-cluster-management-agent 1.6.19.4. Defragmenting the etcd database after removing a cluster Having many managed clusters can affect the size of the etcd database in the hub cluster. In OpenShift Container Platform 4.8, when you delete a managed cluster, the etcd database in the hub cluster is not automatically reduced in size. In some scenarios, the etcd database can run out of space. An error etcdserver: mvcc: database space exceeded is displayed. To correct this error, reduce the size of the etcd database by compacting the database history and defragmenting the etcd database. Note: For OpenShift Container Platform version 4.9 and later, the etcd Operator automatically defragments disks and compacts the etcd history. No manual intervention is needed. The following procedure is for OpenShift Container Platform version 4.8 and earlier. Compact the etcd history and defragment the etcd database in the hub cluster by completing the following procedure. 1.6.19.4.1. Prerequisites Install the OpenShift CLI ( oc ). Log in as a user with cluster-admin privileges. 1.6.19.4.2. Procedure Compact the etcd history. Open a remote shell session to the etcd member, for example: USD oc rsh -n openshift-etcd etcd-control-plane-0.example.com etcdctl endpoint status --cluster -w table Run the following command to compact the etcd history: sh-4.4#etcdctl compact USD(etcdctl endpoint status --write-out="json" | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*' -m1) Example output USD compacted revision 158774421 Defragment the etcd database and clear any NOSPACE alarms as outlined in Defragmenting etcd data . 1.7. Discovery service introduction You can discover OpenShift 4 clusters that are available from OpenShift Cluster Manager . After discovery, you can import your clusters to manage. The Discovery services uses the Discover Operator for back-end and console usage. You must have an OpenShift Cluster Manager credential. See Creating a credential for Red Hat OpenShift Cluster Manager if you need to create a credential. Required access : Administrator Configure Discovery with the console Configure Discovery using the CLI Enabling a discovered cluster for management 1.7.1. Configure Discovery with the console Configure Discovery in the console to find clusters. When you configure the Discovery feature on your cluster, you must enable a DiscoveryConfig resource to connect to the OpenShift Cluster Manager to begin discovering clusters that are a part of your organization. You can create multiple DiscoveryConfig resources with separate credentials. After you discover clusters, you can import clusters that appear in the Discovered clusters tab of the console. Use the product console to enable Discovery. Required access : Access to the namespace where the credential was created. 1.7.1.1. Prerequisites You need a credential. See Creating a credential for Red Hat OpenShift Cluster Manager to connect to OpenShift Cluster Manager. You need access to the namespaces that were used to configure Discovery. 1.7.1.2. Import discovered clusters from the console To manually import other infrastructure provider discovered clusters, complete the following steps: Go to the existing Clusters page and click the Discovered clusters tab. From the Discovered clusters table, find the cluster that you want to import. From the options menu, choose Import cluster . For discovered clusters, you can import manually using the documentation, or you can choose Import clusters automatically. To import automatically with your credentials or Kubeconfig file, copy and paste the content. Click Import . 1.7.1.3. View discovered clusters After you set up your credentials and discover your clusters for import, you can view them in the console. Click Clusters > Discovered clusters View the populated table with the following information: Name is the display name that is designated in OpenShift Cluster Manager. If the cluster does not have a display name, a generated name based on the cluster console URL is displayed. If the console URL is missing or was modified manually in OpenShift Cluster Manager, the cluster external ID is displayed. Namespace is the namespace where you created the credential and discovered clusters. Type is the discovered cluster Red Hat OpenShift type. Distribution version is the discovered cluster Red Hat OpenShift version. Infrastructure provider is the cloud provider of the discovered cluster. Last active is the last time the discovered cluster was active. Created when the discovered cluster was created. Discovered when the discovered cluster was discovered. You can search for any information in the table, as well. For example, to show only Discovered clusters in a particular namespace, search for that namespace. You can now click Import cluster to create managed clusters. 1.7.2. Enable Discovery using the CLI Enable discovery using the CLI to find clusters that are available from Red Hat OpenShift Cluster Manager. Required access : Administrator 1.7.2.1. Prerequisites Create a credential to connect to Red Hat OpenShift Cluster Manager. 1.7.2.2. Discovery set up and process Note: The DiscoveryConfig must be named discovery and must be created in the same namespace as the selected credential . See the following DiscoveryConfig sample: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveryConfig metadata: name: discovery namespace: <NAMESPACE_NAME> spec: credential: <SECRET_NAME> filters: lastActive: 7 openshiftVersions: - "4.14" Replace SECRET_NAME with the credential that you previously set up. Replace NAMESPACE_NAME with the namespace of SECRET_NAME . Enter the maximum time since last activity of your clusters (in days) to discover. For example, with lastActive: 7 , clusters that active in the last 7 days are discovered. Enter the versions of Red Hat OpenShift clusters to discover as a list of strings. Note: Every entry in the openshiftVersions list specifies an OpenShift major and minor version. For example, specifying "4.11" will include all patch releases for the OpenShift version 4.11 , for example 4.11.1 , 4.11.2 . 1.7.2.3. View discovered clusters View discovered clusters by running oc get discoveredclusters -n <namespace> where namespace is the namespace where the discovery credential exists. 1.7.2.3.1. DiscoveredClusters Objects are created by the Discovery controller. These DiscoveredClusters represent the clusters that are found in OpenShift Cluster Manager by using the filters and credentials that are specified in the DiscoveryConfig discoveredclusters.discovery.open-cluster-management.io API. The value for name is the cluster external ID: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: fd51aafa-95a8-41f7-a992-6fb95eed3c8e namespace: <NAMESPACE_NAME> spec: activity_timestamp: "2021-04-19T21:06:14Z" cloudProvider: vsphere console: https://console-openshift-console.apps.qe1-vmware-pkt.dev02.red-chesterfield.com creation_timestamp: "2021-04-19T16:29:53Z" credential: apiVersion: v1 kind: Secret name: <SECRET_NAME> namespace: <NAMESPACE_NAME> display_name: qe1-vmware-pkt.dev02.red-chesterfield.com name: fd51aafa-95a8-41f7-a992-6fb95eed3c8e openshiftVersion: 4.14 status: Stale 1.7.3. Enabling a discovered cluster for management Automatically import supported clusters into your hub cluster with the Discovery-Operator for faster cluster management, without manually importing individual clusters. Required access: Cluster administrator 1.7.3.1. Prerequisite Discovery is enabled by default. If you changed default settings, you need to enable Discovery. You must set up the OpenShift Service on AWS command line interface. See Getting started with the OpenShift Service on AWS CLI documentation. 1.7.3.2. Importing discovered OpenShift Service on AWS and hosted control plane clusters automatically The following procedure is an example of how to import your discovered OpenShift Service on AWS and hosted control planes clusters automatically by using the Discovery-Operator . 1.7.3.2.1. Importing from the console To automatically import the DiscoveredCluster resource, you must modify the resource and set the importAsManagedCluster field to true in the console. See the following procedure: Log in to your hub cluster from the console. Select Search from the navigation menu. From the search bar, enter the following query: "DiscoveredCluster". The DiscoveredCluster resource results appear. Go to the DiscoveredCluster resource and set importAsManagedCluster to true . See the following example, where importAsManagedCluster is set to true and <4.x.z> is your supported OpenShift Container Platform version: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: 28c17977-fc73-4050-b5cc-a5aa2d1d6892 namespace: discovery spec: openshiftVersion: <4.x.z> isManagedCluster: false cloudProvider: aws name: 28c17977-fc73-4050-b5cc-a5aa2d1d6892 displayName: rosa-dc status: Active importAsManagedCluster: true 1 type: <supported-type> 2 1 By setting the field to true , the Discovery-Operator imports the DiscoveredCluster resource, creates a ManagedCluster resource and if the Red Hat Advanced Cluster Management is installed, creates the KlusterletAddOnConfig resource. It also creates the Secret resources for your automatic import. 2 You must use ROSA or MultiClusterEngineHCP as the parameter value. To verify that the DiscoveredCluster resource is imported, go to the Clusters page. Check the import status of your cluster from the Cluster list tab. If you want to detach managed clusters for Discovery to prevent automatic reimport, select the Detach cluster option. The Discovery-Operator adds the following annotation, discovery.open-cluster-management.io/previously-auto-imported: 'true' . Your DiscoveredCluster resource might resemble the following YAML: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: annotations: discovery.open-cluster-management.io/previously-auto-imported: 'true' To verify that the DiscoveredCluster resource is not reimported automatically, check for the following message in the Discovery-Operator logs, where "rosa-dc" is this discovered cluster: 2024-06-12T14:11:43.366Z INFO reconcile Skipped automatic import for DiscoveredCluster due to existing 'discovery.open-cluster-management.io/previously-auto-imported' annotation {"Name": "rosa-dc"} If you want to reimport the DiscoveredCluster resource automatically, you must remove the previously mentioned annotation. 1.7.3.2.2. Importing from the command line interface To automatically import the DiscoveredCluster resource from the command line complete the following steps: To automatically import the DiscoveredCluster resource, set the importAsManagedCluster paramater to true by using the following command after you log in. Replace <name> and <namespace> with your name and namespace: oc patch discoveredcluster <name> -n <namespace> --type='json' -p='[{"op": "replace", "path": "/spec/importAsManagedCluster", "value": true}]' Run the following command to verify that the cluster was imported as a managed cluster: oc get managedcluster <name> To get a description of your OpenShift Service on AWS cluster ID, run the following command from the OpenShift Service on AWS command line interface: rosa describe cluster --cluster=<cluster-name> | grep -o '^ID:.* For other Kubernetes providers, you must import these infrastructure provider DiscoveredCluster resources manually. Directly apply Kubernetes configurations to the other types of DiscoveredCluster resources. If you enable the importAsManagedCluster field from the DiscoveredCluster resource, it is not imported due to the Discovery webhook. 1.7.3.3. Additional resources See Discovery service introduction . 1.8. Hosted control planes With multicluster engine operator cluster management, you can deploy OpenShift Container Platform clusters by using two different control plane configurations: standalone or hosted control planes. The standalone configuration uses dedicated virtual machines or physical machines to host the OpenShift Container Platform control plane. With hosted control planes for OpenShift Container Platform, you create control planes as pods on a hosting cluster without the need for dedicated physical machines for each control plane. Hosted control planes for OpenShift Container Platform are available on Amazon Web Services (AWS), IBM Power, IBM Z, Red Hat OpenShift Virtualization, bare metal, and non bare metal agent machines. The hosted control planes feature is enabled by default. Hosted control plane clusters offer several advantages: Saves cost by removing the need to host dedicated control plane nodes Introduces separation between the control plane and the workloads, which improves isolation and reduces configuration errors that can require changes Decreases the cluster creation time by removing the requirement for control plane node bootstrapping Supports turn-key deployments or fully customized OpenShift Container Platform provisioning 1.8.1. Requirements The hosting cluster and workers must run on the same infrastructure. For example, you cannot run your hosting cluster on bare metal and your workers on the cloud. However, the hub cluster and workers do not need to run on the same platform. For example, you might run your hosting cluster on bare metal and workers on OpenShift Virtualization. The control plane is associated with a hosted cluster and runs as pods in a single namespace. When the cluster service consumer creates a hosted cluster, it creates a worker node that is independent of the control plane. If you are using a proxy and you want traffic from pods to the Kubernetes API server to not use the proxy, add the default Kubernetes API server address, 172.20.0.1, to the no_proxy list. The following table indicates which OpenShift Container Platform versions are supported for each platform. In the table, Hosting OpenShift Container Platform version refers to the OpenShift Container Platform version where multicluster engine operator is enabled: Platform Hosting OpenShift Container Platform version Hosted OpenShift Container Platform version AWS 4.11 - 4.16 4.14 - 4.16 (only) IBM Power 4.16 4.16 (only) IBM Z 4.16 4.16 (only) Red Hat OpenShift Virtualization 4.14 - 4.16 4.14 - 4.16 (only) Bare metal 4.14 - 4.16 4.14 - 4.16 (only) Non bare metal agent machines 4.16 4.16 (only) 1.8.1.1. Additional resources Configuring additional networks, guaranteed CPUs, and VM scheduling for node pools Architecture of hosted control planes Glossary of common concepts and personas for hosted control planes Creating monitoring dashboards for hosted clusters Backup, restore, and disaster recovery for hosted control planes Configuring hosted control plane clusters using non bare metal agent machines (Technology Preview) 1.8.2. Hosted control plane sizing guidance Many factors, including hosted cluster workload and worker node count, affect how many hosted clusters can fit within a certain number of control-plane nodes. Use this sizing guide to help with hosted cluster capacity planning. This guidance assumes a highly available hosted control plane topology. The load-based sizing examples were measured on a bare-metal cluster. Cloud-based instances might have different limiting factors, such as memory size. For more information about highly available hosted control plane topology, see Distributing hosted cluster workloads . You can override the following resource utilization sizing measurements and disable the metric service monitoring. For more information, see Overriding resource utilization measurements in the Additional resources section. See the following highly available hosted control plane requirements, which were tested with OpenShift Container Platform version 4.12.9 and later: 78 pods Three 8 GiB PVs for etcd Minimum vCPU: approximately 5.5 cores Minimum memory: approximately 19 GiB 1.8.2.1. Pod limits The maxPods setting for each node affects how many hosted clusters can fit in a control-plane node. It is important to note the maxPods value on all control-plane nodes. Plan for about 75 pods for each highly available hosted control plane. For bare-metal nodes, the default maxPods setting of 250 is likely to be a limiting factor because roughly three hosted control planes fit for each node given the pod requirements, even if the machine has plenty of resources to spare. Setting the maxPods value to 500 by configuring the KubeletConfig value allows for greater hosted control plane density, which can help you take advantage of additional compute resources. For more information, see Configuring the maximum number of pods per node in the OpenShift Container Platform documentation. 1.8.2.2. Request-based resource limit The maximum number of hosted control planes that the cluster can host is calculated based on the hosted control plane CPU and memory requests from the pods. A highly available hosted control plane consists of 78 pods that request 5 vCPUs and 18 GB memory. These baseline numbers are compared to the cluster worker node resource capacities to estimate the maximum number of hosted control planes. 1.8.2.3. Load-based limit The maximum number of hosted control planes that the cluster can host is calculated based on the hosted control plane pods CPU and memory utilizations when some workload is put on the hosted control plane Kubernetes API server. The following method is used to measure the hosted control plane resource utilizations as the workload increases: A hosted cluster with 9 workers that use 8 vCPU and 32 GiB each, while using the KubeVirt platform The workload test profile that is configured to focus on API control-plane stress, based on the following definition: Created objects for each namespace, scaling up to 100 namespaces total Additional API stress with continuous object deletion and creation Workload queries-per-second (QPS) and Burst settings set high to remove any client-side throttling As the load increases by 1000 QPS, the hosted control plane resource utilization increases by 9 vCPUs and 2.5 GB memory. For general sizing purposes, consider the 1000 QPS API rate that is a medium hosted cluster load, and a 2000 QPS API that is a heavy hosted cluster load. Note: This test provides an estimation factor to increase the compute resource utilization based on the expected API load. Exact utilization rates can vary based on the type and pace of the cluster workload. Table 1.8. Load table Hosted control plane resource utilization scaling vCPUs Memory (GiB) Resource utilization with no load 2.9 11.1 Resource utilization with 1000 QPS 9.0 2.5 As the load increases by 1000 QPS, the hosted control plane resource utilization increases by 9 vCPUs and 2.5 GB memory. For general sizing purposes, consider a 1000 QPS API rate to be a medium hosted cluster load and a 2000 QPS API to be a heavy hosted cluster load. The following example shows hosted control plane resource scaling for the workload and API rate definitions: Table 1.9. API rate table QPS (API rate) vCPU usage Memory usage (GiB) Low load (Less than 50 QPS) 2.9 11.1 Medium load (1000 QPS) 11.9 13.6 High load (2000 QPS) 20.9 16.1 The hosted control plane sizing is about control-plane load and workloads that cause heavy API activity, etcd activity, or both. Hosted pod workloads that focus on data-plane loads, such as running a database, might not result in high API rates. 1.8.2.4. Sizing calculation example This example provides sizing guidance for the following scenario: Three bare-metal workers that are labeled as hypershift.openshift.io/control-plane nodes maxPods value set to 500 The expected API rate is medium or about 1000, according to the load-based limits Table 1.10. Limit inputs Limit description Server 1 Server 2 Number of vCPUs on worker node 64 128 Memory on worker node (GiB) 128 256 Maximum pods per worker 500 500 Number of workers used to host control planes 3 3 Maximum QPS target rate (API requests per second) 1000 1000 Table 1.11. Sizing calculation example Calculated values based on worker node size and API rate Server 1 Server 2 Calculation notes Maximum hosted control planes per worker based on vCPU requests 12.8 25.6 Number of worker vCPUs / 5 total vCPU requests per hosted control plane Maximum hosted control planes per worker based on vCPU usage 5.4 10.7 Number of vCPUS / (2.9 measured idle vCPU usage + (QPS target rate / 1000) x 9.0 measured vCPU usage per 1000 QPS increase) Maximum hosted control planes per worker based on memory requests 7.1 14.2 Worker memory GiB / 18 GiB total memory request per hosted control plane Maximum hosted control planes per worker based on memory usage 9.4 18.8 Worker memory GiB / (11.1 measured idle memory usage + (QPS target rate / 1000) x 2.5 measured memory usage per 1000 QPS increase) Maximum hosted control planes per worker based on per node pod limit 6.7 6.7 500 maxPods / 75 pods per hosted control plane Minimum of previously mentioned maximums 5.4 6.7 vCPU limiting factor maxPods limiting factor Maximum number of hosted control planes within a management cluster 16 20 Minimum of previously mentioned maximums x 3 control-plane workers Table 1.12. Hosted control plane capacity metrics Name Description mce_hs_addon_request_based_hcp_capacity_gauge Estimated maximum number of hosted control planes the cluster can host based on a highly available hosted control plane resource request. mce_hs_addon_low_qps_based_hcp_capacity_gauge Estimated maximum number of hosted control planes the cluster can host if all hosted control planes make around 50 QPS to the clusters Kube API server. mce_hs_addon_medium_qps_based_hcp_capacity_gauge Estimated maximum number of hosted control planes the cluster can host if all hosted control planes make around 1000 QPS to the clusters Kube API server. mce_hs_addon_high_qps_based_hcp_capacity_gauge Estimated maximum number of hosted control planes the cluster can host if all hosted control planes make around 2000 QPS to the clusters Kube API server. mce_hs_addon_average_qps_based_hcp_capacity_gauge Estimated maximum number of hosted control planes the cluster can host based on the existing average QPS of hosted control planes. If you do not have an active hosted control plane, you can expect low QPS. 1.8.2.5. Additional resources Distribute hosted cluster workloads Configuring the maximum number of pods per node Overriding resource utilization measurements 1.8.3. Overriding resource utilization measurements The set of baseline measurements for resource utilization can vary in every cluster. Fore more information, see Hosted control plane sizing guidance . You can override the resource utilization measurements based on the type and pace of your cluster workload. Complete the following steps: Create the ConfigMap resource by running the following command: Replace <your-config-map-file.yaml> with the name of your YAML file that contains your hcp-sizing-baseline config map. Create the hcp-sizing-baseline config map in the local-cluster namespace to specify the measurements you want to override. Your config map might resemble the following YAML file: kind: ConfigMap apiVersion: v1 metadata: name: hcp-sizing-baseline namespace: local-cluster data: incrementalCPUUsagePer1KQPS: "9.0" memoryRequestPerHCP: "18" minimumQPSPerHCP: "50.0" Delete the hypershift-addon-agent deployment to restart the hypershift-addon-agent pod by running the following command: Observe the hypershift-addon-agent pod logs. Verify that the overridden measurements are updated in the config map by running the following command: Your logs might resemble the following output: If the overridden measurements are not updated properly in the hcp-sizing-baseline config map, you might see the following error message in the hypershift-addon-agent pod logs: 1.8.3.1. Disabling the metric service monitoring After enabling the hypershift-addon managed cluster addon, the metric service monitoring is configured by default so that OpenShift Container Platform monitoring can gather metrics from hypershift-addon . You can disable the metric service monitoring by completing the following steps: Log in to your hub cluster by running the following command: Open the hypershift-addon-deploy-config add-on deployment configuration specification for editing by running the following command: Add the disableMetrics=true customized variable to the specification, as shown in the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: hypershift-addon-deploy-config namespace: multicluster-engine spec: customizedVariables: - name: hcMaxNumber value: "80" - name: hcThresholdNumber value: "60" - name: disableMetrics value: "true" Save the changes. The disableMetrics=true customized variable disables the metric service monitoring configuration for both new and existing hypershift-addon managed cluster add-ons. 1.8.3.2. Additional resources Hosted control plane sizing guidance 1.8.4. Installing the hosted control plane command line interface The hosted control plane command line interface, hcp , is a tool that you can use to get started with hosted control planes. For Day 2 operations, such as management and configuration, use GitOps or your own automation tool. You can install the hosted control plane command line interface, hcp , by completing the following steps: From the OpenShift Container Platform console, click the Help icon > Command Line Tools . Click Download hcp CLI for your platform. Unpack the downloaded archive by running the following command: Run the following command to make the binary file executable: Run the following command to move the binary file to a directory in your path: You can now use the hcp create cluster command to create and manage hosted clusters. To list the available parameters, enter the following command: 1 The supported platforms are aws , agent , and kubevirt . 1.8.4.1. Installing the hosted control plane command line interface by using the CLI You can install the hosted control plane command line interface, hcp , by using the CLI, by completing the following steps: Get the URL to download the hcp binary by running the following command: Download the hcp binary by running the following command: 1 Replace hcp_cli_download_url with the URL that you obtained from the step. Unpack the downloaded archive by running the following command: Run the following command to make the binary file executable: Run the following command to move the binary file to a directory in your path: 1.8.4.2. Installing the hosted control plane command line interface by using the content gateway You can install the hosted control plane command line interface, hcp , by using the content gateway. Complete the following steps: Navigate to the content gateway and download the hcp binary. Unpack the downloaded archive by running the following command: Run the following command to make the binary file executable: Run the following command to move the binary file to a directory in your path: You can now use the hcp create cluster command to create and manage hosted clusters. To list the available parameters, enter the following command: 1 The supported platforms are aws , agent , and kubevirt . 1.8.5. Distributing hosted cluster workloads Before you get started with hosted control planes for OpenShift Container Platform, you must properly label nodes so that the pods of hosted clusters can be scheduled into infrastructure nodes. Node labeling is also important for the following reasons: To ensure high availability and proper workload deployment. For example, you can set the node-role.kubernetes.io/infra label to avoid having the control-plane workload count toward your OpenShift Container Platform subscription. To ensure that control plane workloads are separate from other workloads in the management cluster. Important: Do not use the management cluster for your workload. Workloads must not run on nodes where control planes run. 1.8.5.1. Labels and taints for management cluster nodes As a management cluster administrator, use the following labels and taints in management cluster nodes to schedule a control plane workload: hypershift.openshift.io/control-plane: true : Use this label and taint to dedicate a node to running hosted control plane workloads. By setting a value of true , you avoid sharing the control plane nodes with other components, for example, the infrastructure components of the management cluster or any other mistakenly deployed workload. hypershift.openshift.io/cluster: USD{HostedControlPlane Namespace} : Use this label and taint when you want to dedicate a node to a single hosted cluster. Apply the following labels on the nodes that host control-plane pods: node-role.kubernetes.io/infra : Use this label to avoid having the control-plane workload count toward your subscription. topology.kubernetes.io/zone : Use this label on the management cluster nodes to deploy highly available clusters across failure domains. The zone might be a location, rack name, or the hostname of the node where the zone is set. For example, a management cluster has the following nodes: worker-1a , worker-1b , worker-2a , and worker-2b . The worker-1a and worker-1b nodes are in rack1 , and the worker-2a and worker-2b nodes are in rack2 . To use each rack as an availability zone, enter the following commands: Pods for a hosted cluster have tolerations, and the scheduler uses affinity rules to schedule them. Pods tolerate taints for control-plane and the cluster for the pods. The scheduler prioritizes the scheduling of pods into nodes that are labeled with hypershift.openshift.io/control-plane and hypershift.openshift.io/cluster: USD{HostedControlPlane Namespace} . For the ControllerAvailabilityPolicy option, use HighlyAvailable , which is the default value that the hosted control planes command line interface, hcp , deploys. When you use that option, you can schedule pods for each deployment within a hosted cluster across different failure domains by setting topology.kubernetes.io/zone as the topology key. Control planes that are not highly available are not supported. Important: Proper node labeling, which is detailed in the following procedure, is a prerequisite to deploying hosted control planes. 1.8.5.2. Labeling nodes for hosted clusters To enable a hosted cluster to require its pods to be scheduled into infrastructure nodes, set HostedCluster.spec.nodeSelector , as shown in the following example: spec: nodeSelector: role.kubernetes.io/infra: "" This way, hosted control planes for each hosted cluster are eligible infrastructure node workloads, and you do not need to entitle the underlying OpenShift Container Platform nodes. 1.8.5.3. Priority classes Four built-in priority classes influence the priority and preemption of the hosted cluster pods. You can create the pods in the management cluster in the following order from highest to lowest: hypershift-operator : HyperShift Operator pods. hypershift-etcd : Pods for etcd. hypershift-api-critical : Pods that are required for API calls and resource admission to succeed. These pods include pods such as kube-apiserver , aggregated API servers, and web hooks. hypershift-control-plane : Pods in the control plane that are not API-critical but still need elevated priority, such as the cluster version Operator. 1.8.5.4. Additional resources For more information about hosted control planes, see the following topics: Configuring hosted control plane clusters on bare metal Managing hosted control plane clusters on OpenShift Virtualization Configuring hosted control plane clusters on AWS 1.8.6. Configuring hosted control plane clusters on AWS To configure hosted control planes, you need a hosting cluster and a hosted cluster. By deploying the HyperShift Operator on an existing managed cluster by using the hypershift-addon managed cluster add-on, you can enable that cluster as a hosting cluster and start to create the hosted cluster. The hypershift-addon managed cluster add-on is enabled by default for the local-cluster managed cluster. A hosted cluster is an OpenShift Container Platform cluster with its API endpoint and control plane that are hosted on the hosting cluster. The hosted cluster includes the control plane and its corresponding data plane. You can use the multicluster engine operator console or the hosted control plane command line interface, hcp , to create a hosted cluster. The hosted cluster is automatically imported as a managed cluster. If you want to disable this automatic import feature, see Disabling the automatic import of hosted clusters into multicluster engine operator . Important: Each hosted cluster must have a cluster-wide unique name. A hosted cluster name cannot be the same as any existing managed cluster in order for multicluster engine operator to manage it. Do not use clusters as a hosted cluster name. Run the hub cluster and workers on the same platform for hosted control planes. A hosted cluster cannot be created in the namespace of a multicluster engine operator managed cluster. 1.8.6.1. Prerequisites You must have the following prerequisites to configure a hosting cluster: The multicluster engine for Kubernetes operator 2.5 and later installed on an OpenShift Container Platform cluster. The multicluster engine operator is automatically installed when you install Red Hat Advanced Cluster Management. The multicluster engine operator can also be installed without Red Hat Advanced Cluster Management as an Operator from the OpenShift Container Platform OperatorHub. The multicluster engine operator must have at least one managed OpenShift Container Platform cluster. The local-cluster is automatically imported in multicluster engine operator 2.5 and later. See Advanced configuration for more information about the local-cluster . You can check the status of your hub cluster by running the following command: The AWS command line interface The hosted control plane command line interface For additional resources about hosted control planes, see the following documentation: To disable the hosted control planes feature or, if you already disabled it and want to manually enable it, see Enabling or disabling the hosted control planes feature . To manage hosted clusters by running Red Hat Ansible Automation Platform jobs, see Configuring Ansible Automation Platform jobs to run on hosted clusters . To deploy the SR-IOV Operator, see Deploying the SR-IOV Operator for hosted control planes . 1.8.6.2. Creating the Amazon Web Services S3 bucket and S3 OIDC secret If you plan to create and manage hosted clusters on AWS, complete the following steps: Create an S3 bucket that has public access to host OIDC discovery documents for your clusters. To create the bucket in the us-east-1 region, enter the following code: To create the bucket in a region other than the us-east-1 region, enter the following code: Create an OIDC S3 secret named hypershift-operator-oidc-provider-s3-credentials for the HyperShift operator. Save the secret in the local-cluster namespace. See the following table to verify that the secret contains the following fields: Field name Description bucket Contains an S3 bucket with public access to host OIDC discovery documents for your hosted clusters. credentials A reference to a file that contains the credentials of the default profile that can access the bucket. By default, HyperShift only uses the default profile to operate the bucket . region Specifies the region of the S3 bucket. The following example shows a sample AWS secret template: Note: Disaster recovery backup for the secret is not automatically enabled. Run the following command to add the label that enables the hypershift-operator-oidc-provider-s3-credentials secret to be backed up for disaster recovery: 1.8.6.3. Creating a routable public zone To access applications in your guest clusters, the public zone must be routable. If the public zone exists, skip this step. Otherwise, the public zone will affect the existing functions. Run the following command to create a public zone for cluster DNS records: Replace your-basedomain with your base domain, for example, www.example.com . 1.8.6.4. Creating an AWS IAM role and STS credentials Before creating hosted clusters on Amazon Web Services (AWS), you must create AWS Security Token Service (STS) credentials and an AWS Identity and Access Management (IAM) role for the hcp command line interface (CLI). Get the Amazon Resource Name (ARN) of your user by running the following command: aws sts get-caller-identity --query "Arn" --output text See the following example output: You need the arn value from the output to use in the step. Create a JSON file named trust-relationship.json that contains the trust relationship configuration for your role. Replace <arn> with the ARN of your user that you noted in the step. See the following example: { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "AWS": "<arn>" }, "Action": "sts:AssumeRole" } ] } Create the Identity and Access Management (IAM) role by running the following command. Replace <name> with the role name, which is hcp-cli-role in this example. Replace <file_name> with the file name, which is trust-relationship.json in this command: aws iam create-role --role-name <hcp-cli-role> --assume-role-policy-document file://trust-relationship.json --query "Role.Arn" --output json See the following example output: Create a JSON file named policy.json that contains the following permission policies for your role: { "Version": "2012-10-17", "Statement": [ { "Sid": "EC2", "Effect": "Allow", "Action": [ "ec2:CreateDhcpOptions", "ec2:DeleteSubnet", "ec2:ReplaceRouteTableAssociation", "ec2:DescribeAddresses", "ec2:DescribeInstances", "ec2:DeleteVpcEndpoints", "ec2:CreateNatGateway", "ec2:CreateVpc", "ec2:DescribeDhcpOptions", "ec2:AttachInternetGateway", "ec2:DeleteVpcEndpointServiceConfigurations", "ec2:DeleteRouteTable", "ec2:AssociateRouteTable", "ec2:DescribeInternetGateways", "ec2:DescribeAvailabilityZones", "ec2:CreateRoute", "ec2:CreateInternetGateway", "ec2:RevokeSecurityGroupEgress", "ec2:ModifyVpcAttribute", "ec2:DeleteInternetGateway", "ec2:DescribeVpcEndpointConnections", "ec2:RejectVpcEndpointConnections", "ec2:DescribeRouteTables", "ec2:ReleaseAddress", "ec2:AssociateDhcpOptions", "ec2:TerminateInstances", "ec2:CreateTags", "ec2:DeleteRoute", "ec2:CreateRouteTable", "ec2:DetachInternetGateway", "ec2:DescribeVpcEndpointServiceConfigurations", "ec2:DescribeNatGateways", "ec2:DisassociateRouteTable", "ec2:AllocateAddress", "ec2:DescribeSecurityGroups", "ec2:RevokeSecurityGroupIngress", "ec2:CreateVpcEndpoint", "ec2:DescribeVpcs", "ec2:DeleteSecurityGroup", "ec2:DeleteDhcpOptions", "ec2:DeleteNatGateway", "ec2:DescribeVpcEndpoints", "ec2:DeleteVpc", "ec2:CreateSubnet", "ec2:DescribeSubnets" ], "Resource": "*" }, { "Sid": "ELB", "Effect": "Allow", "Action": [ "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeTargetGroups", "elasticloadbalancing:DeleteTargetGroup" ], "Resource": "*" }, { "Sid": "IAMPassRole", "Effect": "Allow", "Action": "iam:PassRole", "Resource": "arn:*:iam::*:role/*-worker-role", "Condition": { "ForAnyValue:StringEqualsIfExists": { "iam:PassedToService": "ec2.amazonaws.com" } } }, { "Sid": "IAM", "Effect": "Allow", "Action": [ "iam:CreateInstanceProfile", "iam:DeleteInstanceProfile", "iam:GetRole", "iam:UpdateAssumeRolePolicy", "iam:GetInstanceProfile", "iam:TagRole", "iam:RemoveRoleFromInstanceProfile", "iam:CreateRole", "iam:DeleteRole", "iam:PutRolePolicy", "iam:AddRoleToInstanceProfile", "iam:CreateOpenIDConnectProvider", "iam:TagOpenIDConnectProvider", "iam:ListOpenIDConnectProviders", "iam:DeleteRolePolicy", "iam:UpdateRole", "iam:DeleteOpenIDConnectProvider", "iam:GetRolePolicy" ], "Resource": "*" }, { "Sid": "Route53", "Effect": "Allow", "Action": [ "route53:ListHostedZonesByVPC", "route53:CreateHostedZone", "route53:ListHostedZones", "route53:ChangeResourceRecordSets", "route53:ListResourceRecordSets", "route53:DeleteHostedZone", "route53:AssociateVPCWithHostedZone", "route53:ListHostedZonesByName" ], "Resource": "*" }, { "Sid": "S3", "Effect": "Allow", "Action": [ "s3:ListAllMyBuckets", "s3:ListBucket", "s3:DeleteObject", "s3:DeleteBucket" ], "Resource": "*" } ] } Attach the policy.json file to your role by running the following command: aws iam put-role-policy \ --role-name <role_name> \ 1 --policy-name <policy_name> \ 2 --policy-document file://policy.json 3 1 Replace <role_name> with the name of your role. 2 Replace <policy_name> with your policy name. 3 The policy.json file contains the permission policies for your role. Retrieve STS credentials in a JSON file named sts-creds.json by running the following command: aws sts get-session-token --output json > sts-creds.json See the following example content of the sts-creds.json file : 1.8.6.5. Enabling external DNS The control plane and the data plane are separate in hosted control planes. You can configure DNS in two independent areas: Ingress for workloads within the hosted cluster, such as the following domain: *.apps.service-consumer-domain.com Ingress for service endpoints within the management cluster, such as API or OAUTH endpoints through the service provider domain: *.service-provider-domain.com The input for the hostedCluster.spec.dns manages the ingress for workloads within the hosted cluster. The input for hostedCluster.spec.services.servicePublishingStrategy.route.hostname manages the ingress for service endpoints within the management cluster. External DNS creates name records for hosted cluster Services that specify a publishing type of LoadBalancer or Route and provide a hostname for that publishing type. For hosted clusters with Private or PublicAndPrivate endpoint access types, only the APIServer and OAuth services support hostnames. For Private hosted clusters, the DNS record resolves to a private IP address of a Virtual Private Cloud (VPC) endpoint in your VPC. A hosted control plane exposes the following services: APIServer OAuthServer Konnectivity Ignition OVNSbDb OIDC You can expose these services by using the servicePublishingStrategy field in the HostedCluster specification. By default, for the LoadBalancer and Route types of servicePublishingStrategy , you can publish the service in one of the following ways: By using the hostname of the load balancer that is in the status of the Service with the LoadBalancer type By using the status.host field of the Route resource However, when you deploy hosted control planes in a managed service context, those methods can expose the ingress subdomain of the underlying management cluster and limit options for the management cluster lifecycle and disaster recovery. When a DNS indirection is layered on the LoadBalancer and Route publishing types, a managed service operator can publish all public hosted cluster services by using a service-level domain. This architecture allows remapping on the DNS name to a new LoadBalancer or Route and does not expose the ingress domain of the management cluster. Hosted control planes uses external DNS to achieve that indirection layer. You can deploy external-dns alongside the HyperShift Operator in the hypershift namespace of the management cluster. External DNS watches for Services or Routes that have the external-dns.alpha.kubernetes.io/hostname annotation. That annotation is used to create a DNS record that points to the Service , such as a record, or the Route , such as a CNAME record. You can use external DNS on cloud environments only. For the other environments, you need to manually configure DNS and services. For more information about external DNS, see external DNS . 1.8.6.5.1. Prerequisites Before you can set up external DNS for hosted control planes, you must meet the following prerequisites: You created an external public domain You have access to the AWS Route53 Management console 1.8.6.5.2. Setting up external DNS for hosted control planes To provision hosted control plane clusters with service-level DNS (external DNS), complete the following steps: Create an AWS credential secret for the HyperShift Operator and name it hypershift-operator-external-dns-credentials in the local-cluster namespace. See the following table to verify that the secret has the required fields: Field name Description Optional or required provider The DNS provider that manages the service-level DNS zone. Required domain-filter The service-level domain. Required credentials The credential file that supports all external DNS types. Optional when you use AWS keys aws-access-key-id The credential access key id. Optional when you use the AWS DNS service aws-secret-access-key The credential access key secret. Optional when you use the AWS DNS service The following example shows the sample hypershift-operator-external-dns-credentials secret template: oc create secret generic hypershift-operator-external-dns-credentials --from-literal=provider=aws --from-literal=domain-filter=<domain_name> --from-file=credentials=<path_to_aws_credentials_file> -n local-cluster Note: Disaster recovery backup for the secret is not automatically enabled. To back up the secret for disaster recovery, add the hypershift-operator-external-dns-credentials by entering the following command: oc label secret hypershift-operator-external-dns-credentials -n local-cluster cluster.open-cluster-management.io/backup="" 1.8.6.5.3. Creating the public DNS hosted zone The External DNS Operator uses the public DNS hosted zone to create your public hosted cluster. You can create the public DNS hosted zone to use as the external DNS domain-filter. Complete the following steps in the AWS Route 53 management console: In the Route 53 management console, click Create hosted zone . On the Hosted zone configuration page, type a domain name, verify that Publish hosted zone is selected as the type, and click Create hosted zone . After the zone is created, on the Records tab, note the values in the Value/Route traffic to column. In the main domain, create an NS record to redirect the DNS requests to the delegated zone. In the Value field, enter the values that you noted in the step. Click Create records . Verify that the DNS hosted zone is working by creating a test entry in the new subzone and testing it with a dig command like the following example: To create a hosted cluster that sets the hostname for LoadBalancer and Route services, enter the following command: Replace <public_hosted_zone> with the public hosted zone that you created. This example shows the resulting services block for the hosted cluster: platform: aws: endpointAccess: PublicAndPrivate ... services: - service: APIServer servicePublishingStrategy: route: hostname: api-example.service-provider-domain.com type: Route - service: OAuthServer servicePublishingStrategy: route: hostname: oauth-example.service-provider-domain.com type: Route - service: Konnectivity servicePublishingStrategy: type: Route - service: Ignition servicePublishingStrategy: type: Route The Control Plane Operator creates the Services and Routes resources and annotates them with the external-dns.alpha.kubernetes.io/hostname annotation. For Services and Routes , the Control Plane Operator uses a value of the hostname parameter in the servicePublishingStrategy field for the service endpoints. To create the DNS records, you can use a mechanism, such as the external-dns deployment. You can configure service-level DNS indirection for public services only. You cannot set hostname for private services because they use the hypershift.local private zone. The following table notes when it is valid to set hostname for a service and endpoint combination: Service Public PublicAndPrivate Private APIServer Y Y N OAuthServer Y Y N Konnectivity Y N N Ignition Y N N 1.8.6.5.4. Deploying a cluster by using the command line interface and external DNS To create a hosted cluster by using the PublicAndPrivate or Public publishing strategy, you must have the following artifacts configured in your management cluster: The public DNS hosted zone The External DNS Operator The HyperShift Operator To deploy a hosted cluster by using the command line interface, complete the following steps: To access your management cluster, enter the following command: export KUBECONFIG=<path_to_management_cluster_kubeconfig> Verify that the External DNS Operator is running by entering the following command: oc get pod -n hypershift -lapp=external-dns See the following example output: To create a hosted cluster by using external DNS, enter the following command: hcp create cluster aws \ --role-arn <arn_role> \ 1 --instance-type <instance_type> \ 2 --region <region> \ 3 --auto-repair \ --generate-ssh \ --name <hosted_cluster_name> \ 4 --namespace clusters \ --base-domain <service_consumer_domain> \ 5 --node-pool-replicas <node_replica_count> \ 6 --pull-secret <path_to_your_pull_secret> \ 7 --release-image quay.io/openshift-release-dev/ocp-release:<ocp_release_image> \ 8 --external-dns-domain=<service_provider_domain> \ 9 --endpoint-access=PublicAndPrivate 10 --sts-creds <path_to_sts_credential_file> 11 1 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . 2 Specify the instance type, for example, m6i.xlarge . 3 Specify the AWS region, for example, us-east-1 . 4 Specify your hosted cluster name, for example, my-external-aws . 5 Specify the public hosted zone that the service consumer owns, for example, service-consumer-domain.com . 6 Specify the node replica count, for example, 2 . 7 Specify the path to your pull secret file. 8 Specify the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . 9 Specify the public hosted zone that the service provider owns, for example, service-provider-domain.com . 10 Set as PublicAndPrivate . You can use external DNS with Public or PublicAndPrivate configurations only. 11 Specify the path to your AWS STS credentials file, for example, /home/user/sts-creds/sts-creds.json . 1.8.6.6. Enabling AWS PrivateLink If you plan to provision hosted control plane clusters on the AWS platform with PrivateLink, complete the following steps: Create an AWS credential secret for the HyperShift Operator and name it hypershift-operator-private-link-credentials . The secret must reside in the managed cluster namespace that is the namespace of the managed cluster being used as the hosting cluster. If you used local-cluster , create the secret in the local-cluster namespace. See the following table to confirm that the secret contains the required fields: Field name Description Optional or required region Region for use with Private Link Required aws-access-key-id The credential access key id. Required aws-secret-access-key The credential access key secret. Required The following example shows the sample hypershift-operator-private-link-credentials secret template: Note: Disaster recovery backup for the secret is not automatically enabled. Run the following command to add the label that enables the hypershift-operator-private-link-credentials secret to be backed up for disaster recovery: 1.8.6.7. Disaster recovery for a hosted cluster The hosted control plane runs on the multicluster engine operator hub cluster. The data plane runs on a separate platform that you choose. When recovering the multicluster engine operator hub cluster from a disaster, you might also want to recover the hosted control planes. See Disaster recovery for a hosted cluster within an AWS region to learn how to back up a hosted control plane cluster and restore it on a different cluster. Important: Disaster recovery for hosted clusters is available on AWS only. 1.8.6.8. Deploying a hosted cluster on AWS Before you deploy a hosted cluster on Amazon Web Services (AWS), you must perform the following actions: Set up the hosted control plane command line interface, hcp . Enable the local-cluster managed cluster as the hosting cluster. Create an AWS Identity and Access Management (IAM) role and AWS Security Token Service (STS) credentials. See Creating an AWS IAM role and STS credentials . If you want to deploy a private hosted cluster, see Deploying a private hosted cluster on AWS . To deploy a hosted cluster on AWS, complete the following steps: To create the hosted cluster, run the following command: hcp create cluster aws \ --name <hosted_cluster_name> \ 1 --infra-id <infra_id> \ 2 --base-domain <basedomain> \ 3 --sts-creds <path_to_sts_credential_file> \ 4 --pull-secret <path_to_pull_secret> \ 5 --region <region> \ 6 --generate-ssh \ --node-pool-replicas <node_pool_replica_count> \ 7 --namespace <hosted_cluster_namespace> \ 8 --role-arn <role_name> 9 1 Specify the name of your hosted cluster, for instance, example . 2 Specify your infrastructure name. You must provide the same value for <hosted_cluster_name> and <infra_id> . Otherwise the cluster might not appear correctly in the multicluster engine for Kubernetes operator console. 3 Specify your base domain, for example, example.com . 4 Specify the path to your AWS STS credentials file, for example, /home/user/sts-creds/sts-creds.json . 5 Specify the path to your pull secret, for example, /user/name/pullsecret . 6 Specify the AWS region name, for example, us-east-1 . 7 Specify the node pool replica count, for example, 3 . 8 By default, all HostedCluster and NodePool custom resources are created in the clusters namespace. You can use the --namespace <namespace> parameter, to create the HostedCluster and NodePool custom resources in a specific namespace. 9 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . Verify the status of your hosted cluster to check that the value of AVAILABLE is True . Run the following command: oc get hostedclusters -n <hosted_cluster_namespace> Get a list of your node pools by running the following command: oc get nodepools --namespace <hosted_cluster_namespace> 1.8.6.9. Creating a hosted cluster in multiple zones on AWS Before you create a hosted cluster on Amazon Web Services (AWS), you must create an AWS Identity and Access Management (IAM) role and AWS Security Token Service (STS) credentials. See Creating an AWS IAM role and STS credentials . Create a hosted cluster in multiple zones on AWS by running the following command: hcp create cluster aws \ --name <hosted_cluster_name> \ 1 --node-pool-replicas=<node_pool_replica_count> \ 2 --base-domain <basedomain> \ 3 --pull-secret <path_to_pull_secret> \ 4 --role-arn <arn_role> \ 5 --region <region> \ 6 --zones <zones> 7 --sts-creds <path_to_sts_credential_file> 8 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the node pool replica count, for example, 2 . 3 Specify your base domain, for example, example.com . 4 Specify the path to your pull secret, for example, /user/name/pullsecret . 5 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . 6 Specify the AWS region name, for example, us-east-1 . 7 Specify availability zones within your AWS region, for example, us-east-1a , and us-east-1b . 8 Specify the path to your AWS STS credentials file, for example, /home/user/sts-creds/sts-creds.json . For each specified zone, the following infrastructure is created: Public subnet Private subnet NAT gateway Private route table (public route table is shared across public subnets) One NodePool resource is created for each zone. The node pool name is suffixed by the zone name. The private subnet for zone is set in spec.platform.aws.subnet.id . 1.8.6.9.1. Providing credentials for creating a hosted cluster on AWS When you create a hosted cluster by using the hcp create cluster aws command, you need to provide AWS account credentials that have permissions to create infrastructure resources for your cluster. Examples of infrastructure resources include VPCs, subnets, and NAT gateways. You can provide the AWS credentials in two ways: by using the --sts-creds flag or by using the AWS cloud provider secret from multicluster engine operator. 1.8.6.9.1.1. Providing credentials by using the --sts-creds flag You can use AWS STS credentials to create a hosted cluster by specifying the --sts-creds flag. Run the following command: hcp create cluster aws \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <node_pool_replica_count> \ 2 --base-domain <basedomain> \ 3 --pull-secret <path_to_pull_secret> \ 4 --sts-creds <path_to_sts_credential_file> \ 5 --region <region> 6 --role-arn <arn_role> \ 7 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the node pool replica count, for example, 2 . 3 Specify your base domain, for example, example.com . 4 Specify the path to your pull secret, for example, /user/name/pullsecret . 5 Specify the path to your AWS STS credentials file, for example, /home/user/sts-creds/sts-creds.json . 6 Specify the AWS region name, for example, us-east-1 . 7 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . 1.8.6.9.2. Additional resources For instructions to install the AWS Elastic File Service (EFS) CSI Driver Operator on a hosted cluster, see Configuring AWS EFS CSI Driver Operator with Security Token Service . 1.8.6.10. Enabling hosted clusters to run on different architectures (Technology Preview) By default for hosted control planes on AWS, you use an AMD64 hosted cluster. However, you can enable hosted control planes to run on an ARM64 hosted cluster. You can also create node pools of different architecture types for the same hosted cluster. See the following table for compatible combinations of node pools and hosted clusters: Hosted cluster Node pool AMD64 AMD64 or ARM64 ARM64 ARM64 or AMD64 1.8.6.10.1. Prerequisites You must have an OpenShift Container Platform cluster with a 64-bit ARM infrastructure that is installed on Amazon Web Serivces (AWS). For more information, see Create an OpenShift Cluster: AWS (ARM) . You must create an AWS Identity and Access Management (IAM) role and AWS Security Token Service (STS) credentials. See Creating an AWS IAM role and STS credentials . 1.8.6.10.2. Running a hosted cluster on an ARM64 OpenShift Container Platform cluster You can run a hosted cluster on an ARM64 OpenShift Container Platform cluster by overriding the default release image with a multi-architecture release image. Complete the following steps: Create a hosted cluster by running the following command: hcp create cluster aws \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <node_pool_replica_count> \ 2 --base-domain <basedomain> \ 3 --pull-secret <path_to_pull_secret> \ 4 --sts-creds <path_to_sts_credential_file> \ 5 --region <region> \ 6 --release-image quay.io/openshift-release-dev/ocp-release:<ocp_release_image> \ 7 --role-arn <role_name> \ 8 --multi-arch 9 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the node pool replica count, for example, 3 . 3 Specify your base domain, for example, example.com . 4 Specify the path to your pull secret, for example, /user/name/pullsecret . 5 Specify the path to your AWS STS credentials file, for example, /home/user/sts-creds/sts-creds.json . 6 Specify the AWS region name, for example, us-east-1 . 7 Specify the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . If you are using a disconnected environment, replace <ocp_release_image> with the digest image. To extract the OpenShift Container Platform release image digest, see Extracting the OpenShift Container Platform release image digest . 8 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . 9 The multi-arch flag indicates that the hosted cluster supports both amd64 and arm64 node pools. When you set the flag and supply a release image or release stream, the release image or stream must be a multi-architecture release image or stream. If you do not set the multi-arch flag and you use a multi-architecture release image or stream, the CLI automatically sets the flag to true and generates a log message. A node pool supports only one CPU architecture within the same node pool. Add a NodePool object to the hosted cluster by running the following command: hcp create nodepool aws \ --cluster-name <hosted_cluster_name> \ 1 --name <nodepool_name> \ 2 --node-count <node_pool_replica_count> 3 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the node pool name. 3 Specify the node pool replica count, for example, 3 . 1.8.6.10.3. Creating ARM NodePool objects on AWS hosted clusters You can schedule application workloads ( NodePool objects) on 64-bit ARM and AMD64 from the same hosted control plane. You can define the arch field in the NodePool specification to set the required processor architecture for the NodePool object. The valid values for the arch field are as follows: arm64 amd64 If you do not specify a value for the arch field, the amd64 value is used by default. 1.8.6.10.3.1. Prerequisites You must have a multi-architecture image for the HostedCluster custom resource to use. You can access multi-architecture nightly images at https://multi.ocp.releases.ci.openshift.org/ . 1.8.6.10.3.2. Creating an ARM or AMD NodePool object on an AWS hosted cluster To create an ARM or AMD NodePool object on a hosted cluster on AWS, complete the following steps: Add a NodePool object to the hosted cluster by running the following command: hcp create nodepool aws \ --cluster-name <hosted_cluster_name> \ 1 --name <node_pool_name> \ 2 --node-count <node_pool_replica_count> 3 --arch <architecture> 4 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the node pool name. 3 Specify the node pool replica count, for example, 3 . 4 Specify the architecture type, such as arm64 or amd64 . 1.8.6.11. Accessing the hosted cluster You can access the hosted cluster by either getting the kubeconfig file and kubeadmin credential directly from resources, or by using the hcp command line interface to generate a kubeconfig file. To access the hosted cluster by getting the kubeconfig file and credentials directly from resources, you need to be familiar with the access secrets for hosted control plane clusters. The secrets are stored in the hosted cluster (hosting) namespace. The hosted cluster (hosting) namespace contains hosted cluster resources, and the hosted control plane namespace is where the hosted control plane runs. The secret name formats are as follows: kubeconfig secret: <hosted-cluster-namespace>-<name>-admin-kubeconfig (clusters-hypershift-demo-admin-kubeconfig) kubeadmin password secret: <hosted-cluster-namespace>-<name>-kubeadmin-password (clusters-hypershift-demo-kubeadmin-password) The kubeconfig secret contains a Base64-encoded kubeconfig field, which you can decode and save into a file to use with the following command: The kubeadmin password secret is also Base64-encoded. You can decode it and use the password to log in to the API server or console of the hosted cluster. To access the hosted cluster by using the hcp CLI to generate the kubeconfig file, take the following steps: Generate the kubeconfig file by entering the following command: After you save the kubeconfig file, you can access the hosted cluster by entering the following example command: 1.8.6.11.1. Additional resources After you access the hosted cluster, you can scale a node pool or enable node auto-scaling for the hosted cluster. For more information, read these topics: Scaling a node pool Enabling node auto-scaling for the hosted cluster To configure node tuning for a hosted cluster, see the following topics: Configuring node tuning in a hosted cluster Advanced node tuning for hosted clusters by setting kernel boot parameters . 1.8.6.12. Deploying a private hosted cluster on AWS After you set up the hosted control planes command line interface, hcp , and enable the local-cluster as the hosting cluster, you can deploy a hosted cluster or a private hosted cluster on Amazon Web Services (AWS). To deploy a public hosted cluster on AWS, see Deploying a hosted cluster on AWS . By default, hosted control plane guest clusters are publicly accessible through public DNS and the default router for the management cluster. For private clusters on AWS, all communication with the guest cluster occurs over AWS PrivateLink. To configure hosted control planes for private cluster support on AWS, take the following steps. Important: Although public clusters can be created in any region, you can create private clusters only in the region that is specified by the --aws-private-region flag. Prerequisites Creating a private hosted cluster on AWS Accessing a private hosting cluster on AWS 1.8.6.12.1. Prerequisites To enable private hosted clusters for AWS, you must first enable AWS PrivateLink. For more information, see Enabling AWS PrivateLink . You must create an AWS Identity and Access Management (IAM) role and AWS Security Token Service (STS) credentials. For more information, see Creating an AWS IAM role and STS credentials and Identity and Access Management (IAM) permissions . To access a private cluster, you need a bastion instance on AWS. For more information, see Linux Bastion Hosts on AWS in the AWS documentation. 1.8.6.12.2. Creating a private hosted cluster on AWS To create a private hosted cluster on AWS, complete the following steps: Create a private hosted cluster by entering the following command, replacing variables with your values as needed: hcp create cluster aws \ --name <hosted_cluster_name> \ 1 --node-pool-replicas=<node_pool_replica_count> \ 2 --base-domain <basedomain> \ 3 --pull-secret <path_to_pull_secret> \ 4 --sts-creds <path_to_sts_credential_file> \ 5 --region <region> \ 6 --endpoint-access Private 7 --role-arn <role_name> 8 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the node pool replica count, for example, 3 . 3 Specify your base domain, for example, example.com . 4 Specify the path to your pull secret, for example, /user/name/pullsecret . 5 Specify the path to your AWS STS credentials file, for example, /home/user/sts-creds/sts-creds.json . 6 Specify the AWS region name, for example, us-east-1 . 7 Defines whether a cluster is public or private. 8 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . For more information about ARN roles, see Identity and Access Management (IAM) permissions . The API endpoints for the cluster are accessible through a private DNS zone: api.<hosted_cluster_name>.hypershift.local *.apps.<hosted_cluster_name>.hypershift.local 1.8.6.12.2.1. Additional resources For more information about deploying a public hosted cluster on AWS, see Deploying a hosted cluster on AWS . For more information about the ARN roles that you need to create a hosted cluster, see Identity and Access Management (IAM) permissions . 1.8.6.12.3. Accessing a private hosting cluster on AWS Find the private IPs of nodes in the cluster node pool by entering the following command: aws ec2 describe-instances --filter="Name=tag:kubernetes.io/cluster/<infra_id>,Values=owned" | jq '.Reservations[] | .Instances[] | select(.PublicDnsName=="") | .PrivateIpAddress' Create a kubeconfig file for the hosted cluster that you can copy to a node by entering the following command: hcp create kubeconfig > <hosted_cluster_kubeconfig> Enter the following command to SSH into one of the nodes through the bastion: ssh -o ProxyCommand="ssh ec2-user@<bastion_ip> -W %h:%p" core@<node_ip> From the SSH shell, copy the kubeconfig file contents to a file on the node by entering the following command: mv <path_to_kubeconfig_file> <new_file_name> Export the kubeconfig file by entering the following command: export KUBECONFIG=<path_to_kubeconfig_file> Observe the guest cluster status by entering the following command: oc get clusteroperators clusterversion 1.8.6.13. Managing AWS infrastructure and IAM permissions for hosted control planes When you use hosted control planes for Red Hat OpenShift Container Platform on Amazon Web Services (AWS), the infrastructure requirements vary based on your setup. Prerequisites AWS infrastructure requirements Identity and Access Management permissions Creating AWS infrastructure and IAM resources separately 1.8.6.13.1. Prerequisites You must configure hosted control planes before you can create hosted control plane clusters. See Configuring hosted control plane clusters on AWS for more information. You must create an AWS Identity and Access Management (IAM) role and AWS Security Token Service (STS) credentials. See Creating an AWS IAM role and STS credentials . 1.8.6.13.2. AWS infrastructure requirements When you use hosted control planes on AWS, the infrastructure requirements fit in the following categories: Prerequired and unmanaged infrastructure for the HyperShift Operator in an arbitrary AWS account Prerequired and unmanaged infrastructure in a hosted cluster AWS account Hosted control planes-managed infrastructure in a management AWS account Hosted control planes-managed infrastructure in a hosted cluster AWS account Kubernetes-managed infrastructure in a hosted cluster AWS account Prerequired means that hosted control planes requires AWS infrastructure to properly work. Unmanaged means that no Operator or controller creates the infrastructure for you. The following sections contain details about the creation of the AWS resources. 1.8.6.13.2.1. Prerequired and unmanaged infrastructure for the HyperShift Operator in an arbitrary AWS account An arbitrary AWS account depends on the provider of the hosted control planes service. In self-managed hosted control planes, the cluster service provider controls the AWS account. The cluster service provider is the administrator who hosts cluster control planes and is responsible for uptime. In managed hosted control planes, the AWS account belongs to Red Hat. In a prerequired and unmanaged infrastructure for the HyperShift Operator, the following infrastructure requirements apply for a management cluster AWS account: One S3 Bucket OpenID Connect (OIDC) Route 53 hosted zones A domain to host private and public entries for hosted clusters 1.8.6.13.2.2. Prerequired and unmanaged infrastructure in a hosted cluster AWS account When your infrastructure is prerequired and unmanaged in a hosted cluster AWS account, the infrastructure requirements for all access modes are as follows: One VPC One DHCP Option Two subnets A private subnet that is an internal data plane subnet A public subnet that enables access to the internet from the data plane One internet gateway One elastic IP One NAT gateway One security group (worker nodes) Two route tables (one private and one public) Two Route 53 hosted zones Enough quota for the following items: One Ingress service load balancer for public hosted clusters One private link endpoint for private hosted clusters Note: For private link networking to work, the endpoint zone in the hosted cluster AWS account must match the zone of the instance that is resolved by the service endpoint in the management cluster AWS account. In AWS, the zone names are aliases, such as us-east-2b , which do not necessarily map to the same zone in different accounts. As a result, for private link to work, the management cluster must have subnets or workers in all zones of its region. 1.8.6.13.2.3. Hosted control planes-managed infrastructure in a management AWS account When your infrastructure is managed by hosted control planes in a management AWS account, the infrastructure requirements differ depending on whether your clusters are public, private, or a combination. For accounts with public clusters, the infrastructure requirements are as follows: Network load balancer: a load balancer Kube API server Kubernetes creates a security group Volumes For etcd (one or three depending on high availability) For OVN-Kube For accounts with private clusters, the infrastructure requirements are as follows: Network load balancer: a load balancer private router Endpoint service (private link) For accounts with public and private clusters, the infrastructure requirements are as follows: Network load balancer: a load balancer public router Network load balancer: a load balancer private router Endpoint service (private link) Volumes: For etcd (one or three depending on high availability) For OVN-Kube 1.8.6.13.2.4. Hosted control planes-managed infrastructure in a hosted cluster AWS account When your infrastructure is managed by hosted control planes in a hosted cluster AWS account, the infrastructure requirements differ depending on whether your clusters are public, private, or a combination. For accounts with public clusters, the infrastructure requirements are as follows: Node pools must have EC2 instances that have Role and RolePolicy defined. For accounts with private clusters, the infrastructure requirements are as follows: One private link endpoint for each availability zone EC2 instances for node pools For accounts with public and private clusters, the infrastructure requirements are as follows: One private link endpoint for each availability zone EC2 instances for node pools 1.8.6.13.2.5. Kubernetes-managed infrastructure in a hosted cluster AWS account When Kubernetes manages your infrastructure in a hosted cluster AWS account, the infrastructure requirements are as follows: A network load balancer for default Ingress An S3 bucket for registry 1.8.6.13.3. Identity and Access Management (IAM) permissions In the context of hosted control planes, the consumer is responsible to create the Amazon Resource Name (ARN) roles. The consumer is an automated process to generate the permissions files. The consumer might be the command line interface or OpenShift Cluster Manager. Hosted control planes tries to enable granularity to honor the principle of least-privilege components, which means that every component uses its own role to operate or create AWS objects, and the roles are limited to what is required for the product to function normally. For an example of how the command line interface can create the ARN roles, see "Creating AWS infrastructure and IAM resources separately". The hosted cluster receives the ARN roles as input and the consumer creates an AWS permission configuration for each component. As a result, the component can authenticate through STS and preconfigured OIDC IDP. The following roles are consumed by some of the components from hosted control planes that run on the control plane and operate on the data plane: controlPlaneOperatorARN imageRegistryARN ingressARN kubeCloudControllerARN nodePoolManagementARN storageARN networkARN The following example shows a reference to the IAM roles from the hosted cluster: The roles that hosted control planes uses are shown in the following examples: ingressARN imageRegistryARN storageARN networkARN kubeCloudControllerARN nodePoolManagementARN controlPlaneOperatorARN 1.8.6.13.4. Creating AWS infrastructure and IAM resources separately By default, the hcp create cluster aws command creates cloud infrastructure with the hosted cluster and applies it. You can create the cloud infrastructure portion separately so that the hcp create cluster aws command can be used only to create the cluster, or render it so that you can modify it before you apply it. To create the cloud infrastructure portion separately, you need to create the AWS infrastructure, create the AWS Identity and Access (IAM) resources, and create the cluster. 1.8.6.13.4.1. Creating the AWS infrastructure To create the AWS infrastructure, you need to create a VPC and other resources for your cluster. You can use the AWS console or an infrastructure automation and provisioning tool. For instructions to use the AWS console, see Create a VPC plus other VPC resources in the AWS Documentation. The VPC must include private and public subnets and resources for external access, such as an NAT gateway and an internet gateway. In addition to the VPC, you need a private hosted zone for the ingress of your cluster. If you are creating clusters that use PrivateLink ( Private or PublicAndPrivate access modes), you need an additional hosted zone for PrivateLink. The following YAML file shows the fields that are required to create the AWS infrastructure for your cluster. Example YAML file --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {} --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxxxx kind: Secret metadata: creationTimestamp: null labels: hypershift.openshift.io/safe-to-delete-with-cluster: "true" name: <pull_secret_name> 1 namespace: clusters --- apiVersion: v1 data: key: xxxxxxxxxxxxxxxxx kind: Secret metadata: creationTimestamp: null labels: hypershift.openshift.io/safe-to-delete-with-cluster: "true" name: <etcd_encryption_key_name> 2 namespace: clusters type: Opaque --- apiVersion: v1 data: id_rsa: xxxxxxxxx id_rsa.pub: xxxxxxxxx kind: Secret metadata: creationTimestamp: null labels: hypershift.openshift.io/safe-to-delete-with-cluster: "true" name: <ssh-key-name> 3 namespace: clusters --- apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: creationTimestamp: null name: <hosted_cluster_name> 4 namespace: clusters spec: autoscaling: {} configuration: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: <dns_domain> 5 privateZoneID: xxxxxxxx publicZoneID: xxxxxxxx etcd: managed: storage: persistentVolume: size: 8Gi storageClassName: gp3-csi type: PersistentVolume managementType: Managed fips: false infraID: <infra_id> 6 issuerURL: <issuer_url> 7 networking: clusterNetwork: - cidr: 10.132.0.0/14 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 olmCatalogPlacement: management platform: aws: cloudProviderConfig: subnet: id: <subnet_xxx> 8 vpc: <vpc_xxx> 9 zone: us-west-1b endpointAccess: Public multiArch: false region: us-west-1 rolesRef: controlPlaneOperatorARN: arn:aws:iam::820196288204:role/<infra_id>-control-plane-operator imageRegistryARN: arn:aws:iam::820196288204:role/<infra_id>-openshift-image-registry ingressARN: arn:aws:iam::820196288204:role/<infra_id>-openshift-ingress kubeCloudControllerARN: arn:aws:iam::820196288204:role/<infra_id>-cloud-controller networkARN: arn:aws:iam::820196288204:role/<infra_id>-cloud-network-config-controller nodePoolManagementARN: arn:aws:iam::820196288204:role/<infra_id>-node-pool storageARN: arn:aws:iam::820196288204:role/<infra_id>-aws-ebs-csi-driver-controller type: AWS pullSecret: name: <pull_secret_name> release: image: quay.io/openshift-release-dev/ocp-release:4.16-x86_64 secretEncryption: aescbc: activeKey: name: <etcd_encryption_key_name> type: aescbc services: - service: APIServer servicePublishingStrategy: type: LoadBalancer - service: OAuthServer servicePublishingStrategy: type: Route - service: Konnectivity servicePublishingStrategy: type: Route - service: Ignition servicePublishingStrategy: type: Route - service: OVNSbDb servicePublishingStrategy: type: Route sshKey: name: <ssh_key_name> status: controlPlaneEndpoint: host: "" port: 0 --- apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: <node_pool_name> 10 namespace: clusters spec: arch: amd64 clusterName: <hosted_cluster_name> management: autoRepair: true upgradeType: Replace nodeDrainTimeout: 0s platform: aws: instanceProfile: <instance_profile_name> 11 instanceType: m6i.xlarge rootVolume: size: 120 type: gp3 subnet: id: <subnet_xxx> type: AWS release: image: quay.io/openshift-release-dev/ocp-release:4.16-x86_64 replicas: 2 status: replicas: 0 1 Replace <pull_secret_name> with the name of your pull secret. 2 Replace <etcd_encryption_key_name> with the name of your etcd encryption key. 3 Replace <ssh_key_name> with the name of your SSH key. 4 Replace <hosted_cluster_name> with the name of your hosted cluster. 5 Replace <dns_domain> with your base DNS domain, such as example.com . 6 Replace <infra_id> with the value that identifies the IAM resources that are associated with the hosted cluster. 7 Replace <issuer_url> with your issuer URL, which ends with your infra_id value. For example, https://example-hosted-us-west-1.s3.us-west-1.amazonaws.com/example-hosted-infra-id . 8 Replace <subnet_xxx> with your subnet ID. Both private and public subnets need to be tagged. For public subnets, use kubernetes.io/role/elb=1 . For private subnets, use kubernetes.io/role/internal-elb=1 . 9 Replace <vpc_xxx> with your VPC ID. 10 Replace <node_pool_name> with the name of your NodePool resource. 11 Replace <instance_profile_name> with the name of your AWS instance. 1.8.6.13.4.2. Creating the AWS IAM resources In AWS, you need to create the following IAM resources: One OIDC provider, which is required to enable STS authentication Seven roles, which are separate for every component that interacts with the provider, such as the Kubernetes controller manager, cluster API provider, and registry One instance profile, which is the profile that is assigned to all worker instances of the cluster To create an OIDC provider, follow the steps in Create an OpenID Connect (OIDC) identity provider in IAM in the AWS documentation. To create the roles for each component that interacts with the provider, follow the procedures in Creating IAM roles in the AWS documentation. For more information about the roles, see "Identity and Access Management (IAM) permissions." To create an instance profile, see Using instance profiles in the AWS documentation. 1.8.6.13.4.3. Creating the cluster To create the cluster, enter the following command: hcp create cluster aws \ --infra-id <infra_id> \ 1 --name <hosted_cluster_name> \ 2 --sts-creds <path_to_sts_credential_file> \ 3 --pull-secret <path_to_pull_secret> \ 4 --generate-ssh \ 5 --node-pool-replicas 3 --role-arn <role_name> 6 1 Replace <infra_id> with the same ID that you specified in the create infra aws command. This value identifies the IAM resources that are associated with the hosted cluster. 2 Replace <hosted_cluster_name> with the name of your hosted cluster. 3 Replace <path_to_sts_credential_file> with the same name that you specified in the create infra aws command. 4 Replace <path_to_pull_secret> with the name of the file that contains a valid OpenShift Container Platform pull secret. 5 The --generate-ssh flag is optional, but is good to include in case you need to SSH to your workers. An SSH key is generated for you and is stored as a secret in the same namespace as the hosted cluster. 6 Replace <role_name> with the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . For more information about ARN roles, see "Identity and Access Management (IAM) permissions". You can also add the --render flag to the command and redirect output to a file where you can edit the resources before you apply them to the cluster. After you run the command, the following resources are applied to your cluster: A namespace A secret with your pull secret A HostedCluster A NodePool Three AWS STS secrets for control plane components One SSH key secret if you specified the --generate-ssh flag. 1.8.6.14. Destroying a hosted cluster on AWS To destroy a hosted cluster and its managed cluster resource on Amazon Web Services (AWS), complete the following steps: Delete the managed cluster resource on multicluster engine operator. Replace <hosted_cluster_name> with the name of your cluster. Run the following command: oc delete managedcluster <hosted_cluster_name> Delete the hosted cluster and its back-end resources by running the following command: hcp destroy cluster aws \ --name <hosted_cluster_name> \ 1 --infra-id <infra_id> \ 2 --role-arn <arn_role> \ 3 --sts-creds <path_to_sts_credential_file> \ 4 --base-domain <basedomain> 5 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the infrastructure name for your hosted cluster. 3 Specify the Amazon Resource Name (ARN), for example, arn:aws:iam::820196288204:role/myrole . 4 Specify the path to your AWS Security Token Service (STS) credentials file, for example, /home/user/sts-creds/sts-creds.json . 5 Specify your base domain, for example, example.com . Note: If your session token for AWS Security Token Service (STS) is expired, retrieve STS credentials in a JSON file named sts-creds.json by running the following command: aws sts get-session-token --output json > sts-creds.json 1.8.7. Configuring hosted control plane clusters on bare metal You can deploy hosted control planes by configuring a cluster to function as a hosting cluster. The hosting cluster is the OpenShift Container Platform cluster where the control planes are hosted. The hosting cluster is also known as the management cluster. Note: The management cluster is not the same thing as the managed cluster. A managed cluster is a cluster that the hub cluster manages. The hosted control planes feature is enabled by default. The multicluster engine operator 2.5 supports only the default local-cluster , which is a hub cluster that is managed, and the hub cluster as the hosting cluster. On Red Hat Advanced Cluster Management 2.10, you can use the managed hub cluster, also known as the local-cluster , as the hosting cluster. A hosted cluster is an OpenShift Container Platform cluster with its API endpoint and control plane that are hosted on the hosting cluster. The hosted cluster includes the control plane and its corresponding data plane. You can use the multicluster engine operator console or the hosted control plane command line interface, hcp , to create a hosted cluster. The hosted cluster is automatically imported as a managed cluster. If you want to disable this automatic import feature, see Disabling the automatic import of hosted clusters into multicluster engine operator . Important: Run the hub cluster and workers on the same platform for hosted control planes. Each hosted cluster must have a cluster-wide unique name. A hosted cluster name cannot be the same as any existing managed cluster in order for multicluster engine operator to manage it. Do not use clusters as a hosted cluster name. A hosted cluster cannot be created in the namespace of a multicluster engine operator managed cluster. To provision hosted control planes on bare metal, you can use the Agent platform. The Agent platform uses the central infrastructure management service to add worker nodes to a hosted cluster. For an introduction to the central infrastructure management service, see Enabling the central infrastructure management service . All bare metal hosts require a manual boot with a Discovery Image ISO that the central infrastructure management provides. You can start the hosts manually or through automation by using Cluster-Baremetal-Operator . After each host starts, it runs an Agent process to discover the host details and complete the installation. An Agent custom resource represents each host. When you create a hosted cluster with the Agent platform, HyperShift installs the Agent Cluster API provider in the hosted control plane namespace. When you scale a replica by the node pool, a machine is created. For every machine, the Cluster API provider finds and installs an Agent that meets the requirements that are specified in the node pool specification. You can monitor the installation of an Agent by checking its status and conditions. When you scale down a node pool, Agents are unbound from the corresponding cluster. Before you can reuse the Agents, you must restart them by using the Discovery image. When you configure storage for hosted control planes, consider the recommended etcd practices. To ensure that you meet the latency requirements, dedicate a fast storage device to all hosted control plane etcd instances that run on each control-plane node. You can use LVM storage to configure a local storage class for hosted etcd pods. For more information, see Recommended etcd practices and Persistent storage using logical volume manager storage in the OpenShift Container Platform documentation. 1.8.7.1. Prerequisites You must have the following prerequisites to configure a hosting cluster: You need the multicluster engine for Kubernetes operator 2.2 and later installed on an OpenShift Container Platform cluster. The multicluster engine operator is automatically installed when you install Red Hat Advanced Cluster Management. You can also install multicluster engine operator without Red Hat Advanced Cluster Management as an Operator from the OpenShift Container Platform OperatorHub. The multicluster engine operator must have at least one managed OpenShift Container Platform cluster. The local-cluster is automatically imported in multicluster engine operator 2.2 and later. See Advanced configuration for more information about the local-cluster . You can check the status of your hub cluster by running the following command: You must add the topology.kubernetes.io/zone label to your bare metal hosts on your management cluster. Otherwise, all of the hosted control plane pods are scheduled on a single node, causing single point of failure. You need to enable central infrastructure management. For more information, see Enabling the central infrastructure management service . You need to install the hosted control plane command line interface . For a hub cluster that has a proxy configuration to reach the hosted cluster API endpoint, add all hosted cluster API endpoints to the noProxy field on the Proxy object. For more information, see Configuring the cluster-wide proxy . 1.8.7.2. Bare metal firewall, port, and service requirements You must meet the firewall, port, and service requirements so that ports can communicate between the management cluster, the control plane, and hosted clusters. Note: Services run on their default ports. However, if you use the NodePort publishing strategy, services run on the port that is assigned by the NodePort service. Use firewall rules, security groups, or other access controls to restrict access to only required sources. Avoid exposing ports publicly unless necessary. For production deployments, use a load balancer to simplify access through a single IP address. A hosted control plane exposes the following services on bare metal: APIServer The APIServer service runs on port 6443 by default and requires ingress access for communication between the control plane components. If you use MetalLB load balancing, allow ingress access to the IP range that is used for load balancer IP addresses. OAuthServer The OAuthServer service runs on port 443 by default when you use the route and ingress to expose the service. If you use the NodePort publishing strategy, use a firewall rule for the OAuthServer service. Konnectivity The Konnectivity service runs on port 443 by default when you use the route and ingress to expose the service. The Konnectivity agent establishes a reverse tunnel to allow the control plane to access the network for the hosted cluster. The agent uses egress to connect to the Konnectivity server. The server is exposed by using either a route on port 443 or a manually assigned NodePort . If the cluster API server address is an internal IP address, allow access from the workload subnets to the IP address on port 6443. If the address is an external IP address, allow egress on port 6443 to that external IP address from the nodes. Ignition The Ignition service runs on port 443 by default when you use the route and ingress to expose the service. If you use the NodePort publishing strategy, use a firewall rule for the Ignition service. You do not need the following services on bare metal: OVNSbDb OIDC 1.8.7.3. Bare metal infrastructure requirements The Agent platform does not create any infrastructure, but it does have the following requirements for infrastructure: Agents: An Agent represents a host that is booted with a discovery image and is ready to be provisioned as an OpenShift Container Platform node. DNS: The API and ingress endpoints must be routable. For additional resources about hosted control planes on bare metal, see the following documentation: To learn about etcd and LVM storage recommendations, see Recommended etcd practices and Persistent storage using logical volume manager storage . To configure hosted control planes on bare metal in disconnected environment, see Configuring hosted control planes in a disconnected environment . To disable the hosted control planes feature or, if you already disabled it and want to manually enable it, see Enabling or disabling the hosted control planes feature . To manage hosted clusters by running Red Hat Ansible Automation Platform jobs, see Configuring Ansible Automation Platform jobs to run on hosted clusters . To deploy the SR-IOV Operator, see Deploying the SR-IOV Operator for hosted control planes . If you want to disable the automatic import feature, see Disabling the automatic import of hosted clusters into multicluster engine operator . 1.8.7.4. Configuring DNS on bare metal The API Server for the hosted cluster is exposed as a NodePort service. A DNS entry must exist for api.USD{HOSTED_CLUSTER_NAME}.USD{BASEDOMAIN} that points to destination where the API Server can be reached. The DNS entry can be as simple as a record that points to one of the nodes in the managed cluster that is running the hosted control plane. The entry can also point to a load balancer that is deployed to redirect incoming traffic to the ingress pods. See the following example DNS configuration: If you are configuring DNS for a disconnected environment on an IPv6 network, see the following example DNS configuration: If you are configuring DNS for a disconnected environment on a dual stack network, be sure to include DNS entries for both IPv4 and IPv6. See the following example DNS configuration: , create a host inventory for hosted control planes on bare metal. 1.8.7.5. Creating a hosted cluster on bare metal You can create a hosted cluster on bare metal or import one. For instructions to import a hosted cluster, see Importing a hosted cluster . Create the hosted control plane namespace by entering the following command: oc create ns <hosted_cluster_namespace>-<hosted_cluster_name> Replace <hosted_cluster_namespace> with your hosted cluster namespace name, for example, clusters . Replace <hosted_cluster_name> with your hosted cluster name. Verify that you have a default storage class configured for your cluster. Otherwise, you might see pending PVCs. Run the following command: hcp create cluster agent \ --name=<hosted_cluster_name> \ 1 --pull-secret=<path_to_pull_secret> \ 2 --agent-namespace=<hosted_control_plane_namespace> \ 3 --base-domain=<basedomain> \ 4 --api-server-address=api.<hosted_cluster_name>.<basedomain> \ 5 --etcd-storage-class=<etcd_storage_class> \ 6 --ssh-key <path_to_ssh_public_key> \ 7 --namespace <hosted_cluster_namespace> \ 8 --control-plane-availability-policy SingleReplica \ --release-image=quay.io/openshift-release-dev/ocp-release:<ocp_release_image> 9 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the path to your pull secret, for example, /user/name/pullsecret . 3 Specify your hosted control plane namespace, for example, clusters-example . Ensure that agents are available in this namespace by using the oc get agent -n <hosted_control_plane_namespace> command. 4 Specify your base domain, for example, krnl.es . 5 The --api-server-address flag defines the IP address that is used for the Kubernetes API communication in the hosted cluster. If you do not set the --api-server-address flag, you must log in to connect to the management cluster. 6 Specify the etcd storage class name, for example, lvm-storageclass . 7 Specify the path to your SSH public key. The default file path is ~/.ssh/id_rsa.pub . 8 Specify your hosted cluster namespace. 9 Specify the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . If you are using a disconnected environment, replace <ocp_release_image> with the digest image. To extract the OpenShift Container Platform release image digest, see Extracting the OpenShift Container Platform release image digest . After a few moments, verify that your hosted control plane pods are up and running by entering the following command: oc -n <hosted_control_plane_namespace> get pods See the following example output: 1.8.7.5.1. Creating a hosted cluster on bare metal by using the console Open the OpenShift Container Platform web console and log in by entering your administrator credentials. For instructions to open the console, see Accessing the web console in the OpenShift Container Platform documentation. In the console header, ensure that All Clusters is selected. Click Infrastructure > Clusters . Click Create cluster > Host inventory > Hosted control plane . The Create cluster page is displayed. On the Create cluster page, follow the prompts to enter details about the cluster, node pools, networking, and automation. Note: As you enter details about the cluster, you might find the following tips useful: If you want to use predefined values to automatically populate fields in the console, you can create a host inventory credential. For more information, see Creating a credential for an on-premises environment . On the Cluster details page, the pull secret is your OpenShift Container Platform pull secret that you use to access OpenShift Container Platform resources. If you selected a host inventory credential, the pull secret is automatically populated. On the Node pools page, the namespace contains the hosts for the node pool. If you created a host inventory by using the console, the console creates a dedicated namespace. On the Networking page, you select an API server publishing strategy. The API server for the hosted cluster can be exposed either by using an existing load balancer or as a service of the NodePort type. A DNS entry must exist for the api.USD{HOSTED_CLUSTER_NAME}.USD{BASEDOMAIN} setting that points to the destination where the API server can be reached. This entry can be a record that points to one of the nodes in the management cluster or a record that points to a load balancer that redirects incoming traffic to the Ingress pods. Review your entries and click Create . The Hosted cluster view is displayed. Monitor the deployment of the hosted cluster in the Hosted cluster view. If you do not see information about the hosted cluster, ensure that All Clusters is selected, then click the cluster name. Wait until the control plane components are ready. This process can take a few minutes. To view the node pool status, scroll to the NodePool section. The process to install the nodes takes about 10 minutes. You can also click Nodes to confirm whether the nodes joined the hosted cluster. 1.8.7.5.2. Creating a hosted cluster on bare metal by using a mirror registry You can use a mirror registry to create a hosted cluster on bare metal by specifying the --image-content-sources flag in the hcp create cluster command. Complete the following steps: Create a YAML file to define Image Content Source Policies (ICSP). See the following example: - mirrors: - brew.registry.redhat.io source: registry.redhat.io - mirrors: - brew.registry.redhat.io source: registry.stage.redhat.io - mirrors: - brew.registry.redhat.io source: registry-proxy.engineering.redhat.com Save the file as icsp.yaml . This file contains your mirror registries. To create a hosted cluster by using your mirror registries, run the following command: hcp create cluster agent \ --name=<hosted_cluster_name> \ 1 --pull-secret=<path_to_pull_secret> \ 2 --agent-namespace=<hosted_control_plane_namespace> \ 3 --base-domain=<basedomain> \ 4 --api-server-address=api.<hosted_cluster_name>.<basedomain> \ 5 --image-content-sources icsp.yaml \ 6 --ssh-key <path_to_ssh_key> \ 7 --namespace <hosted_cluster_namespace> \ 8 --release-image=quay.io/openshift-release-dev/ocp-release:<ocp_release_image> 9 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the path to your pull secret, for example, /user/name/pullsecret . 3 Specify your hosted control plane namespace, for example, clusters-example . Ensure that agents are available in this namespace by using the oc get agent -n <hosted-control-plane-namespace> command. 4 Specify your base domain, for example, krnl.es . 5 The --api-server-address flag defines the IP address that is used for the Kubernetes API communication in the hosted cluster. If you do not set the --api-server-address flag, you must log in to connect to the management cluster. 6 Specify the icsp.yaml file that defines ICSP and your mirror registries. 7 Specify the path to your SSH public key. The default file path is ~/.ssh/id_rsa.pub . 8 Specify your hosted cluster namespace. 9 Specify the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . If you are using a disconnected environment, replace <ocp_release_image> with the digest image. To extract the OpenShift Container Platform release image digest, see Extracting the OpenShift Container Platform release image digest . 1.8.7.5.3. Additional resources To create credentials that you can reuse when you create a hosted cluster with the console, see Creating a credential for an on-premises environment . To import a hosted cluster, see Manually importing a hosted control plane cluster . To access a hosted cluster, see Accessing the hosted cluster . To add hosts to the host inventory by using the Discovery Image, see Adding hosts to the host inventory by using the Discovery Image . To extract the OpenShift Container Platform release image digest, see Extracting the OpenShift Container Platform release image digest . 1.8.7.6. Verifying hosted cluster creation After the deployment process is complete, you can verify that the hosted cluster was created successfully. Follow these steps a few minutes after you create the hosted cluster. Obtain the kubeconfig for your new hosted cluster by entering the extract command: Use the kubeconfig to view the cluster Operators of the hosted cluster. Enter the following command: See the following example output: You can also view the running pods on your hosted cluster by entering the following command: See the following example output: 1.8.7.7. Scaling the NodePool object for a hosted cluster You can scale up the NodePool object, by adding nodes to your hosted cluster. Scale the NodePool object to two nodes: The Cluster API agent provider randomly picks two agents that are then assigned to the hosted cluster. Those agents go through different states and finally join the hosted cluster as OpenShift Container Platform nodes. The agents pass through states in the following order: binding discovering insufficient installing installing-in-progress added-to-existing-cluster Enter the following command: See the following example output: Enter the following command: See the following example output: Obtain the kubeconfig for your new hosted cluster by entering the extract command: After the agents reach the added-to-existing-cluster state, verify that you can see the OpenShift Container Platform nodes in the hosted cluster by entering the following command: See the following example output: Cluster Operators start to reconcile by adding workloads to the nodes. Enter the following command to verify that two machines were created when you scaled up the NodePool object: See the following example output: The clusterversion reconcile process eventually reaches a point where only Ingress and Console cluster operators are missing. Enter the following command: See the following example output: 1.8.7.7.1. Adding node pools You can create node pools for a hosted cluster by specifying a name, number of replicas, and any additional information, such as an agent label selector. To create a node pool, enter the following information: 1 The --agentLabelSelector is optional. The node pool uses agents with the "size" : "medium" label. Check the status of the node pool by listing nodepool resources in the clusters namespace: Extract the admin-kubeconfig secret by entering the following command: See the following example output: After some time, you can check the status of the node pool by entering the following command: Verify that the number of available node pools match the number of expected node pools by entering this command: 1.8.7.7.2. Additional resources To scale down the data plane to zero, see Scaling down the data plane to zero . 1.8.7.8. Handling ingress in a hosted cluster on bare metal Every OpenShift Container Platform cluster has a default application Ingress Controller that typically has an external DNS record associated with it. For example, if you create a hosted cluster named example with the base domain krnl.es , you can expect the wildcard domain *.apps.example.krnl.es to be routable. To set up a load balancer and wildcard DNS record for the *.apps domain, perform the following actions on your guest cluster: Deploy MetalLB by creating a YAML file that contains the configuration for the MetalLB Operator: apiVersion: v1 kind: Namespace metadata: name: metallb labels: openshift.io/cluster-monitoring: "true" annotations: workload.openshift.io/allowed: management --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: metallb-operator-operatorgroup namespace: metallb --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: metallb-operator namespace: metallb spec: channel: "stable" name: metallb-operator source: redhat-operators sourceNamespace: openshift-marketplace Save the file as metallb-operator-config.yaml . Enter the following command to apply the configuration: oc apply -f metallb-operator-config.yaml After the Operator is running, create the MetalLB instance: Create a YAML file that contains the configuration for the MetalLB instance: apiVersion: metallb.io/v1beta1 kind: MetalLB metadata: name: metallb namespace: metallb Save the file as metallb-instance-config.yaml . Create the MetalLB instance by entering this command: oc apply -f metallb-instance-config.yaml Configure the MetalLB Operator by creating two resources: An IPAddressPool resource with a single IP address. This IP address must be on the same subnet as the network that the cluster nodes use. A BGPAdvertisement resource to advertise the load balancer IP addresses that the IPAddressPool resource provides through the BGP protocol. Create a YAML file to contain the configuration: apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: <ip_address_pool_name> 1 namespace: metallb spec: protocol: layer2 autoAssign: false addresses: - <ingress_ip>-<ingress_ip> 2 --- apiVersion: metallb.io/v1beta1 kind: BGPAdvertisement metadata: name: <bgp_advertisement_name> 3 namespace: metallb spec: ipAddressPools: - <ip_address_pool_name> 4 1 4 Specify the IPAddressPool resource name. 2 Specify the IP address for your environment, for example, 192.168.122.23 . 3 Specify the BGPAdvertisement resource name. Save the file as ipaddresspool-bgpadvertisement-config.yaml . Create the resources by entering the following command: oc apply -f ipaddresspool-bgpadvertisement-config.yaml After creating a service of the LoadBalancer type, MetalLB adds an external IP address for the service. Configure a new load balancer service that routes ingress traffic to the ingress deployment by creating a YAML file named metallb-loadbalancer-service.yaml : kind: Service apiVersion: v1 metadata: annotations: metallb.universe.tf/address-pool: ingress-public-ip name: metallb-ingress namespace: openshift-ingress spec: ports: - name: http protocol: TCP port: 80 targetPort: 80 - name: https protocol: TCP port: 443 targetPort: 443 selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default type: LoadBalancer Save the metallb-loadbalancer-service.yaml file. Enter the following command to apply the YAML configuration: oc apply -f metallb-loadbalancer-service.yaml Enter the following command to reach the OpenShift Container Platform console: curl -kI https://console-openshift-console.apps.example.krnl.es HTTP/1.1 200 OK Check the clusterversion and clusteroperator values to verify that everything is running. Enter the following command: oc --kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion,co See the following example output: + Replace 4.x.y with the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . 1.8.7.8.1. Additional resources For more information about MetalLB, see About MetalLB and the MetalLB Operator in the OpenShift Container Platform documentation. 1.8.7.9. Enabling node auto-scaling for the hosted cluster When you need more capacity in your hosted cluster and spare agents are available, you can enable auto-scaling to install new worker nodes. To enable auto-scaling, enter the following command: Note: In the example, the minimum number of nodes is 2, and the maximum is 5. The maximum number of nodes that you can add might be bound by your platform. For example, if you use the Agent platform, the maximum number of nodes is bound by the number of available agents. Create a workload that requires a new node. Create a YAML file that contains the workload configuration, by using the following example: apiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null labels: app: reversewords name: reversewords namespace: default spec: replicas: 40 selector: matchLabels: app: reversewords strategy: {} template: metadata: creationTimestamp: null labels: app: reversewords spec: containers: - image: quay.io/mavazque/reversewords:latest name: reversewords resources: requests: memory: 2Gi status: {} Save the file as workload-config.yaml . Apply the YAML by entering the following command: Extract the admin-kubeconfig secret by entering the following command: See the following example output: You can check if new nodes are in the Ready status by entering the following command: To remove the node, delete the workload by entering the following command: Wait for several minutes to pass without requiring the additional capacity. On the Agent platform, the agent is decommissioned and can be reused. You can confirm that the node was removed by entering the following command: 1.8.7.9.1. Disabling node auto-scaling for the hosted cluster To disable node auto-scaling, enter the following command: The command removes "spec.autoScaling" from the YAML file, adds "spec.replicas" , and sets "spec.replicas" to the integer value that you specify. 1.8.7.10. Enabling machine health checks on bare metal You can enable machine health checks on bare metal to repair and replace unhealthy managed cluster nodes automatically. You must have additional agent machines that are ready to install in the managed cluster. Consider the following limitations before enabling machine health checks: You cannot modify the MachineHealthCheck object. Machine health checks replace nodes only when at least two nodes stay in the False or Unknown status for more than 8 minutes. After you enable machine health checks for the managed cluster nodes, the MachineHealthCheck object is created in your hosted cluster. To enable machine health checks in your hosted cluster, modify the NodePool resource. Complete the following steps: Verify that the spec.nodeDrainTimeout value in your NodePool resource is greater than 0s . Replace <hosted_cluster_namespace> with the name of your hosted cluster namespace and <nodepool_name> with the node pool name. Run the following command: oc get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep nodeDrainTimeout See the following example output: If the spec.nodeDrainTimeout value is not greater than 0s , modify the value by running the following command: oc patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{"spec":{"nodeDrainTimeout": "30m"}}' --type=merge Enable machine health checks by setting the spec.management.autoRepair field to true in the NodePool resource. Run the following command: oc patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{"spec": {"management": {"autoRepair":true}}}' --type=merge Verify that the NodePool resource is updated with the autoRepair: true value by running the following command: oc get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair 1.8.7.10.1. Disabling machine health checks on bare metal To disable machine health checks for the managed cluster nodes, modify the NodePool resource. Complete the following steps: Disable machine health checks by setting the spec.management.autoRepair field to false in the NodePool resource. Run the following command: oc patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{"spec": {"management": {"autoRepair":false}}}' --type=merge Verify that the NodePool resource is updated with the autoRepair: false value by running the following command: oc get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair 1.8.7.10.1.1. Additional resources For more information about machine health checks, see Deploying machine health checks . 1.8.7.11. Destroying a hosted cluster on bare metal You can use the console to destroy bare metal hosted clusters. Complete the following steps to destroy a hosted cluster on bare metal: In the console, navigate to Infrastructure > Clusters . On the Clusters page, select the cluster that you want to destroy. In the Actions menu, select Destroy clusters to remove the cluster. 1.8.7.11.1. Destroying a hosted cluster on bare metal by using the command line To destroy a hosted cluster and its managed cluster resource, complete the following step: Delete the hosted cluster and its back-end resources by running the following command: Replace <cluster_name> with the name of your cluster. 1.8.8. Configuring hosted control plane clusters using non bare metal agent machines (Technology Preview) You can deploy hosted control planes by configuring a cluster to function as a hosting cluster. The hosting cluster is the OpenShift Container Platform cluster where the control planes are hosted. The hosting cluster is also known as the management cluster. Note: The management cluster is not the same thing as the managed cluster. A managed cluster is a cluster that the hub cluster manages. The hosted control planes feature is enabled by default. The multicluster engine operator 2.5 supports only the default local-cluster , which is a hub cluster that is managed, and the hub cluster as the hosting cluster. On Red Hat Advanced Cluster Management 2.10, you can use the managed hub cluster, also known as the local-cluster , as the hosting cluster. A hosted cluster is an OpenShift Container Platform cluster with its API endpoint and control plane that are hosted on the hosting cluster. The hosted cluster includes the control plane and its corresponding data plane. You can use the multicluster engine operator console or the hosted control plane command line interface, hcp , to create a hosted cluster. The hosted cluster is automatically imported as a managed cluster. If you want to disable this automatic import feature, see Disabling the automatic import of hosted clusters into multicluster engine operator . Important: Each hosted cluster must have a cluster-wide unique name. A hosted cluster name cannot be the same as any existing managed cluster in order for multicluster engine operator to manage it. Do not use clusters as a hosted cluster name. Run the hub cluster and workers on the same platform for hosted control planes. A hosted cluster cannot be created in the namespace of a multicluster engine operator managed cluster. You can add agent machines as a worker node to a hosted cluster by using the Agent platform. Agent machine represents a host booted with a Discovery Image and ready to be provisioned as an OpenShift Container Platform node. The Agent platform is part of the central infrastructure management service. For more information, see Enabling the central infrastructure management service . All hosts that are not bare metal require a manual boot with a Discovery Image ISO that the central infrastructure management provides. When you create a hosted cluster with the Agent platform, HyperShift installs the Agent Cluster API provider in the hosted control plane namespace. When you scale up the node pool, a machine is created for every replica. For every machine, the Cluster API provider finds and installs an Agent that is approved, is passing validations, is not currently in use, and meets the requirements that are specified in the node pool specification. You can monitor the installation of an Agent by checking its status and conditions. When you scale down a node pool, Agents are unbound from the corresponding cluster. Before you can reuse the Agents, you must restart them by using the Discovery image. When you configure storage for hosted control planes, consider the recommended etcd practices. To ensure that you meet the latency requirements, dedicate a fast storage device to all hosted control plane etcd instances that run on each control-plane node. You can use LVM storage to configure a local storage class for hosted etcd pods. For more information, see Recommended etcd practices and Persistent storage using logical volume manager storage in the OpenShift Container Platform documentation. 1.8.8.1. Prerequisites You must have the following prerequisites to configure a hosting cluster: You need the multicluster engine for Kubernetes operator 2.5 and later installed on an OpenShift Container Platform cluster. The multicluster engine operator is automatically installed when you install Red Hat Advanced Cluster Management. You can also install multicluster engine operator without Red Hat Advanced Cluster Management as an Operator from the OpenShift Container Platform OperatorHub. The multicluster engine operator must have at least one managed OpenShift Container Platform cluster. The local-cluster is automatically imported. See Advanced configuration for more information about the local-cluster . You can check the status of your hub cluster by running the following command: You need to enable central infrastructure management. For more information, see Enabling the central infrastructure management service . You need to install the hosted control plane command line interface . 1.8.8.2. Firewall and port requirements for non bare metal agent machines Ensure that you meet the firewall and port requirements so that ports can communicate between the management cluster, the control plane, and hosted clusters: The kube-apiserver service runs on port 6443 by default and requires ingress access for communication between the control plane components. If you use the NodePort publishing strategy, ensure that the node port that is assigned to the kube-apiserver service is exposed. If you use MetalLB load balancing, allow ingress access to the IP range that is used for load balancer IP addresses. If you use the NodePort publishing strategy, use a firewall rule for the ignition-server and Oauth-server settings. The konnectivity agent, which establishes a reverse tunnel to allow bi-directional communication on the hosted cluster, requires egress access to the cluster API server address on port 6443. With that egress access, the agent can reach the kube-apiserver service. If the cluster API server address is an internal IP address, allow access from the workload subnets to the IP address on port 6443. If the address is an external IP address, allow egress on port 6443 to that external IP address from the nodes. If you change the default port of 6443, adjust the rules to reflect that change. Ensure that you open any ports that are required by the workloads that run in the clusters. Use firewall rules, security groups, or other access controls to restrict access to only required sources. Avoid exposing ports publicly unless necessary. For production deployments, use a load balancer to simplify access through a single IP address. 1.8.8.3. Infrastructure requirements for non bare metal agent machines The Agent platform does not create any infrastructure, but it does have the following requirements for infrastructure: Agents: An Agent represents a host that is booted with a discovery image and is ready to be provisioned as an OpenShift Container Platform node. DNS: The API and ingress endpoints must be routable. 1.8.8.4. Configuring DNS on non bare metal agent machines The API Server for the hosted cluster is exposed as a NodePort service. A DNS entry must exist for api.<hosted-cluster-name>.<basedomain> that points to destination where the API Server can be reached. The DNS entry can be as simple as a record that points to one of the nodes in the managed cluster that is running the hosted control plane. The entry can also point to a load balancer that is deployed to redirect incoming traffic to the ingress pods. See the following example DNS configuration: If you are configuring DNS for a disconnected environment on an IPv6 network, see the following example DNS configuration: If you are configuring DNS for a disconnected environment on a dual stack network, be sure to include DNS entries for both IPv4 and IPv6. See the following example DNS configuration: 1.8.8.5. Creating a hosted cluster on non bare metal agent machines You can create a hosted cluster or import one. For instructions to import a hosted cluster, see Importing a hosted cluster . Create the hosted control plane namespace by entering the following command: Replace <hosted-cluster-namespace> with your hosted cluster namespace name, for example, clusters . Replace <hosted-cluster-name> with your hosted cluster name. Verify that you have a default storage class configured for your cluster. Otherwise, you might end up with pending PVCs. Enter the following commands, replacing any example variables with your information: 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the path to your pull secret, for example, /user/name/pullsecret . 3 Specify your hosted control plane namespace, for example, clusters-example . Ensure that agents are available in this namespace by using the oc get agent -n <hosted-control-plane-namespace> command. 4 Specify your base domain, for example, krnl.es . 5 The --api-server-address flag defines the IP address that is used for the Kubernetes API communication in the hosted cluster. If you do not set the --api-server-address flag, you must log in to connect to the management cluster. 6 Specify the etcd storage class name, for example, lvm-storageclass . 7 Specify the path to your SSH public key. The default file path is ~/.ssh/id_rsa.pub . 8 Specify your hosted cluster namespace. 9 Specify the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . After a few moments, verify that your hosted control plane pods are up and running by entering the following command: See the following example output: 1.8.8.5.1. Creating a hosted cluster on non bare metal agent machines by using the console Open the OpenShift Container Platform web console and log in by entering your administrator credentials. For instructions to open the console, see Accessing the web console in the OpenShift Container Platform documentation. In the console header, ensure that All Clusters is selected. Click Infrastructure > Clusters . Click Create cluster Host inventory > Hosted control plane . The Create cluster page is displayed. On the Create cluster page, follow the prompts to enter details about the cluster, node pools, networking, and automation. Note: As you enter details about the cluster, you might find the following tips useful: If you want to use predefined values to automatically populate fields in the console, you can create a host inventory credential. For more information, see Creating a credential for an on-premises environment . On the Cluster details page, the pull secret is your OpenShift Container Platform pull secret that you use to access OpenShift Container Platform resources. If you selected a host inventory credential, the pull secret is automatically populated. On the Node pools page, the namespace contains the hosts for the node pool. If you created a host inventory by using the console, the console creates a dedicated namespace. On the Networking page, you select an API server publishing strategy. The API server for the hosted cluster can be exposed either by using an existing load balancer or as a service of the NodePort type. A DNS entry must exist for the api.USD{HOSTED_CLUSTER_NAME}.USD{BASEDOMAIN} setting that points to the destination where the API server can be reached. This entry can be a record that points to one of the nodes in the management cluster or a record that points to a load balancer that redirects incoming traffic to the Ingress pods. Review your entries and click Create . The Hosted cluster view is displayed. Monitor the deployment of the hosted cluster in the Hosted cluster view. If you do not see information about the hosted cluster, ensure that All Clusters is selected, and click the cluster name. Wait until the control plane components are ready. This process can take a few minutes. To view the node pool status, scroll to the NodePool section. The process to install the nodes takes about 10 minutes. You can also click Nodes to confirm whether the nodes joined the hosted cluster. 1.8.8.5.2. Additional resources To create credentials that you can reuse when you create a hosted cluster with the console, see Creating a credential for an on-premises environment . To import a hosted cluster, see Manually importing a hosted control plane cluster . To access a hosted cluster, see Accessing the hosted cluster . To add hosts to the host inventory by using the Discovery Image, see Adding hosts to the host inventory by using the Discovery Image . 1.8.8.6. Verifying hosted cluster creation After the deployment process is complete, you can verify that the hosted cluster was created successfully. Follow these steps a few minutes after you create the hosted cluster. Obtain the kubeconfig for your new hosted cluster by entering the extract command: Use the kubeconfig to view the cluster Operators of the hosted cluster. Enter the following command: See the following example output: You can also view the running pods on your hosted cluster by entering the following command: See the following example output: 1.8.8.7. Scaling the NodePool object for a hosted cluster You add nodes to your hosted cluster by scaling the NodePool object. Scale the NodePool object to two nodes: The Cluster API agent provider randomly picks two agents that are then assigned to the hosted cluster. Those agents go through different states and finally join the hosted cluster as OpenShift Container Platform nodes. The agents pass through states in the following order: binding discovering insufficient installing installing-in-progress added-to-existing-cluster Enter the following command: See the following example output: Enter the following command: See the following example output: Obtain the kubeconfig for your new hosted cluster by entering the extract command: After the agents reach the added-to-existing-cluster state, verify that you can see the OpenShift Container Platform nodes in the hosted cluster by entering the following command: See the following example output: Cluster Operators start to reconcile by adding workloads to the nodes. Enter the following command to verify that two machines were created when you scaled up the NodePool object: See the following example output: The clusterversion reconcile process eventually reaches a point where only Ingress and Console cluster operators are missing. Enter the following command: See the following example output: 1.8.8.7.1. Adding node pools You can create node pools for a hosted cluster by specifying a name, number of replicas, and any additional information, such as an agent label selector. To create a node pool, enter the following information: 1 The --agentLabelSelector is optional. The node pool uses agents with the "size" : "medium" label. Check the status of the node pool by listing nodepool resources in the clusters namespace: Extract the admin-kubeconfig secret by entering the following command: See the following example output: After some time, you can check the status of the node pool by entering the following command: Verify that the number of available node pools match with the number of expected node pools by entering this command: 1.8.8.7.2. Additional resources To scale down the data plane to zero, see Scaling down the data plane to zero . 1.8.8.8. Handling ingress in a hosted cluster on non bare metal agent machines Every OpenShift Container Platform cluster has a default application Ingress Controller that typically has an external DNS record associated with it. For example, if you create a hosted cluster named example with the base domain krnl.es , you can expect the wildcard domain *.apps.example.krnl.es to be routable. To set up a load balancer and wildcard DNS record for the *.apps domain, perform the following actions on your guest cluster: Deploy MetalLB by creating a YAML file that contains the configuration for the MetalLB Operator: apiVersion: v1 kind: Namespace metadata: name: metallb labels: openshift.io/cluster-monitoring: "true" annotations: workload.openshift.io/allowed: management --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: metallb-operator-operatorgroup namespace: metallb --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: metallb-operator namespace: metallb spec: channel: "stable" name: metallb-operator source: redhat-operators sourceNamespace: openshift-marketplace Save the file as metallb-operator-config.yaml . Enter the following command to apply the configuration: oc apply -f metallb-operator-config.yaml After the Operator is running, create the MetalLB instance: Create a YAML file that contains the configuration for the MetalLB instance: apiVersion: metallb.io/v1beta1 kind: MetalLB metadata: name: metallb namespace: metallb Save the file as metallb-instance-config.yaml . Create the MetalLB instance by entering this command: oc apply -f metallb-instance-config.yaml Configure the MetalLB Operator by creating two resources: An IPAddressPool resource with a single IP address. This IP address must be on the same subnet as the network that the cluster nodes use. A BGPAdvertisement resource to advertise the load balancer IP addresses that the IPAddressPool resource provides through the BGP protocol. Create a YAML file to contain the configuration: apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: <ip_address_pool_name> 1 namespace: metallb spec: protocol: layer2 autoAssign: false addresses: - <ingress_ip>-<ingress_ip> 2 --- apiVersion: metallb.io/v1beta1 kind: BGPAdvertisement metadata: name: <bgp_advertisement_name> 3 namespace: metallb spec: ipAddressPools: - <ip_address_pool_name> 4 1 4 Specify the IPAddressPool resource name. 2 Specify the IP address for your environment, for example, 192.168.122.23 . 3 Specify the BGPAdvertisement resource name. Save the file as ipaddresspool-bgpadvertisement-config.yaml . Create the resources by entering the following command: After creating a service of the LoadBalancer type, MetalLB adds an external IP address for the service. Configure a new load balancer service that routes ingress traffic to the ingress deployment by creating a YAML file named metallb-loadbalancer-service.yaml : kind: Service apiVersion: v1 metadata: annotations: metallb.universe.tf/address-pool: ingress-public-ip name: metallb-ingress namespace: openshift-ingress spec: ports: - name: http protocol: TCP port: 80 targetPort: 80 - name: https protocol: TCP port: 443 targetPort: 443 selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default type: LoadBalancer Save the file as metallb-loadbalancer-service.yaml . Enter the following command to apply the YAML configuration: oc apply -f metallb-loadbalancer-service.yaml Enter the following command to reach the OpenShift Container Platform console: curl -kI https://console-openshift-console.apps.example.krnl.es HTTP/1.1 200 OK Check the clusterversion and clusteroperator values to verify that everything is running. Enter the following command: oc --kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion,co See the following example output: + Replace 4.x.y with the supported OpenShift Container Platform version that you want to use, for example, 4.14.0-x86_64 . 1.8.8.8.1. Additional resources For more information about MetalLB, see About MetalLB and the MetalLB Operator in the OpenShift Container Platform documentation. 1.8.8.9. Enabling node auto-scaling for the hosted cluster When you need more capacity in your hosted cluster and spare agents are available, you can enable auto-scaling to install new worker nodes. To enable auto-scaling, enter the following command. In this example, the minimum number of nodes is 2, and the maximum is 5. The maximum number of nodes that you can add might be bound by your platform. For example, if you use the Agent platform, the maximum number of nodes is bound by the number of available agents: Create a workload that requires a new node. Create a YAML file that contains the workload configuration by using in the following example: apiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null labels: app: reversewords name: reversewords namespace: default spec: replicas: 40 selector: matchLabels: app: reversewords strategy: {} template: metadata: creationTimestamp: null labels: app: reversewords spec: containers: - image: quay.io/mavazque/reversewords:latest name: reversewords resources: requests: memory: 2Gi status: {} Save the file as workload-config.yaml . Apply the YAML by entering the following command: Extract the admin-kubeconfig secret by entering the following command: See the following example output: You can check if new nodes are in the Ready status by entering the following command: To remove the node, delete the workload by entering the following command: Wait for several minutes to pass without requiring the additional capacity. On the Agent platform, the agent is decommissioned and can be reused. You can confirm that the node was removed by entering the following command: 1.8.8.9.1. Disabling node auto-scaling for the hosted cluster To disable node auto-scaling, enter the following command: The command removes "spec.autoScaling" from the YAML file, adds "spec.replicas" , and sets "spec.replicas" to the integer value that you specify. 1.8.8.10. Enabling machine health checks on non bare metal agent machines You can enable machine health checks on non bare metal agent machines to repair and replace unhealthy managed cluster nodes automatically. You must have additional non bare metal agent machines that are ready to install in the managed cluster. Consider the following limitations before enabling machine health checks: You cannot modify the MachineHealthCheck object. Machine health checks replace nodes only when at least two nodes stay in the False or Unknown status for more than 8 minutes. After you enable machine health checks for the managed cluster nodes, the MachineHealthCheck object is created in your hosted cluster. To enable machine health checks in your hosted cluster, modify the NodePool resource. Complete the following steps: Verify that the spec.nodeDrainTimeout value in your NodePool resource is greater than 0s . Replace <hosted_cluster_namespace> with the name of your hosted cluster namespace and <nodepool_name> with the node pool name. Run the following command: oc get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep nodeDrainTimeout See the following example output: If the spec.nodeDrainTimeout value is not greater than 0s , modify the value by running the following command: oc patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{"spec":{"nodeDrainTimeout": "30m"}}' --type=merge Enable machine health checks by setting the spec.management.autoRepair field to true in the NodePool resource. Run the following command: oc patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{"spec": {"management": {"autoRepair":true}}}' --type=merge Verify that the NodePool resource is updated with the autoRepair: true value by running the following command: oc get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair 1.8.8.10.1. Disabling machine health checks on non bare metal agent machines To disable machine health checks for managed cluster nodes, modify the NodePool resource. Complete the following steps: Disable machine health checks by setting the spec.management.autoRepair field to false in the NodePool resource. Run the following command: oc patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{"spec": {"management": {"autoRepair":false}}}' --type=merge Verify that the NodePool resource is updated with the autoRepair: false value by running the following command: oc get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair 1.8.8.10.1.1. Additional resources For more information about machine health checks, see Deploying machine health checks . 1.8.8.11. Destroying a hosted cluster on non bare metal agent machines You can use the console to destroy non bare metal hosted clusters. Complete the following steps to destroy a hosted cluster on non bare metal agent machines: In the console, navigate to Infrastructure > Clusters . On the Clusters page, select the cluster that you want to destroy. In the Actions menu, select Destroy clusters to remove the cluster. 1.8.8.11.1. Destroying a hosted cluster on non bare metal agent machines by using the command line To destroy a hosted cluster, complete the following step: Delete the hosted cluster and its back-end resources by running the following command: Replace <hosted_cluster_name> with the name of your cluster. 1.8.9. Configuring the hosting cluster on a 64-bit x86 OpenShift Container Platform cluster to create hosted control planes for IBM Power compute nodes (Technology Preview) Technology Preview: Configuring the hosting cluster on 64-bit x86 bare metal for IBM Power ( ppc64le ) compute nodes has limited support. You can deploy hosted control planes by configuring a cluster to function as a hosting cluster. The hosting cluster is the OpenShift Container Platform cluster where the control planes are hosted. The hosting cluster is also known as the management cluster. Note: The management cluster is not the managed cluster. A managed cluster is a cluster that the hub cluster manages. The multicluster engine operator 2.5 supports only the default local-cluster , which is a hub cluster that is managed, and the hub cluster as the hosting cluster. Important: To provision hosted control planes on bare metal, you can use the Agent platform. The Agent platform uses the central infrastructure management service to add worker nodes to a hosted cluster. For an introduction to the central infrastructure management service, see Creating a host inventory . Each IBM Power system host must be started with a Discovery Image that the central infrastructure management provides. After each host starts, it runs an Agent process to discover the details of the host and completes the installation. An Agent custom resource represents each host. When you create a hosted cluster with the Agent platform, HyperShift installs the Agent Cluster API provider in the hosted control plane namespace. When you scale up a node pool, a machine is created. The Cluster API provider finds an Agent that is approved, is passing validations, is not currently in use, and meets the requirements that are specified in the node pool specification. You can monitor the installation of an Agent by checking its status and conditions. When you scale down a node pool, Agents are unbound from the corresponding cluster. Before you can reuse the clusters, you must restart them by using the Discovery image to update the number of nodes. 1.8.9.1. Prerequisites You must have the following prerequisites to configure a hosting cluster: multicluster engine for Kubernetes operator 2.5 and later installed on an OpenShift Container Platform cluster. The multicluster engine operator is automatically installed when you install Red Hat Advanced Cluster Management. You can also install multicluster engine operator without Red Hat Advanced Cluster Management as an Operator from the OpenShift Container Platform OperatorHub. The multicluster engine operator must have at least one managed OpenShift Container Platform cluster. The local-cluster is automatically imported in multicluster engine operator 2.5 and later. See Advanced configuration for more information about the local-cluster . You can check the status of your hub cluster by running the following command: oc get managedclusters local-cluster You need a hosting cluster with at least 3 worker nodes to run the HyperShift Operator. You need to enable the central infrastructure management service. For more information, see Enabling the central infrastructure management service . You need to install the hosted control plane command line interface. See Installing the hosted control plane command line interface . 1.8.9.2. IBM Power infrastructure requirements The Agent platform does not create any infrastructure, but requires the following for infrastructure: Agents: An Agent represents a host that is booted with a discovery image and is ready to be provisioned as an OpenShift Container Platform node. DNS: The API and ingress endpoints must be routable. 1.8.9.3. IBM Power configuration documentation After you meet the prerequisites, see the following topics to configure hosted control planes on bare metal: Adding agents to the InfraEnv resource Configuring DNS for hosted control planes on IBM Power Creating a hosted cluster on bare metal Creating an InfraEnv resource for hosted control planes on 64-bit x86 bare metal for IBM Power compute nodes Scaling the NodePool object for a hosted cluster on IBM Power . 1.8.9.4. Adding agents to the InfraEnv resource You can add agents by manually configuring the machine to start with the live ISO. Download the live ISO and use it to start a host (bare metal or VM). The URL for the live ISO can be found in the InfraEnv resource, in the status.isoDownloadURL field. At startup, the host communicates with the Assisted Service and registers as an agent in the same namespace as the InfraEnv resource. To list the agents and some of their properties, enter the following command: See the following example output: After each agent is created, you can optionally set its installation_disk_id and hostname in the specification and approve the agent by entering the following commands: To verify that the agents are approved for use, enter the following command and check the output: See the following example output: 1.8.9.5. Configuring DNS for hosted control planes on IBM Power The API server for the hosted cluster is exposed. A DNS entry must exist for the api.<hosted-cluster-name>.<base-domain> entry that points to the destination where the API server is reachable. The DNS entry can be as simple as a record that points to one of the nodes in the managed cluster that is running the hosted control plane. The entry can also point to a load balancer that is deployed to redirect incoming traffic to the ingress pods. See the following example of DNS configuration: USD cat /var/named/<example.krnl.es.zone> See the following example output: USD TTL 900 @ IN SOA bastion.example.krnl.es.com. hostmaster.example.krnl.es.com. ( 2019062002 1D 1H 1W 3H ) IN NS bastion.example.krnl.es.com. ; ; api IN A 1xx.2x.2xx.1xx 1 api-int IN A 1xx.2x.2xx.1xx ; ; *.apps.<hosted-cluster-name>.<basedomain> IN A 1xx.2x.2xx.1xx ; ;EOF 1 The record refers to the IP address of the API load balancer that handles ingress and egress traffic for hosted control planes. For IBM Power, add IP addresses that correspond to the IP address of the agent. compute-0 IN A 1xx.2x.2xx.1yy compute-1 IN A 1xx.2x.2xx.1yy 1.8.9.6. Creating an InfraEnv resource for hosted control planes on 64-bit x86 bare metal for IBM Power compute nodes An InfraEnv is a environment where hosts that are starting the live ISO can join as agents. In this case, the agents are created in the same namespace as your hosted control plane. To create an InfraEnv resource, complete the following steps: Create a YAML file to contain the configuration. See the following example: apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: <hosted-cluster-name> namespace: <hosted-control-plane-namespace> spec: cpuArchitecture: ppc64le pullSecretRef: name: pull-secret sshAuthorizedKey: <ssh-public-key> Save the file as infraenv-config.yaml . Apply the configuration by entering the following command: To fetch the URL to download the live ISO, which allows IBM Power machines to join as agents, enter the following command: 1.8.9.7. Scaling the NodePool object for a hosted cluster on IBM Power The NodePool object is created when you create a hosted cluster. By scaling the NodePool object, you can add more compute nodes to the hosted control plane. Run the following command to scale the NodePool object to two nodes: oc -n <clusters_namespace> scale nodepool <nodepool_name> --replicas 2 The Cluster API agent provider randomly picks two agents that are then assigned to the hosted cluster. Those agents go through different states and finally join the hosted cluster as OpenShift Container Platform nodes. The agents pass through the transition phases in the following order: binding discovering insufficient installing installing-in-progress added-to-existing-cluster Run the following command to see the status of a specific scaled agent: oc -n <hosted_control_plane_namespace> get agent -o jsonpath='{range .items[*]}BMH: {@.metadata.labels.agent-install\.openshift\.io/bmh} Agent: {@.metadata.name} State: {@.status.debugInfo.state}{"\n"}{end}' See the following output: BMH: Agent: 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d State: known-unbound BMH: Agent: 5e498cd3-542c-e54f-0c58-ed43e28b568a State: insufficient Run the following command to see the transition phases: oc -n <hosted_control_plane_namespace> get agent See the following output: NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d hosted-forwarder true auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a true auto-assign da503cf1-a347-44f2-875c-4960ddb04091 hosted-forwarder true auto-assign Run the following command to generate the kubeconfig file to access the hosted cluster: hcp create kubeconfig --namespace <clusters_namespace> --name <hosted_cluster_namespace> > <hosted_cluster_name>.kubeconfig After the agents reach the added-to-existing-cluster state, verify that you can see the OpenShift Container Platform nodes by entering the following command: oc --kubeconfig <hosted_cluster_name>.kubeconfig get nodes See the following output: NAME STATUS ROLES AGE VERSION worker-zvm-0.hostedn.example.com Ready worker 5m41s v1.24.0+3882f8f worker-zvm-1.hostedn.example.com Ready worker 6m3s v1.24.0+3882f8f Enter the following command to verify that two machines were created when you scaled up the NodePool object: oc -n <hosted_control_plane_namespace> get machine.cluster.x-k8s.io See the following output: NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION hosted-forwarder-79558597ff-5tbqp hosted-forwarder-crqq5 worker-zvm-0.hostedn.example.com agent://50c23cda-cedc-9bbd-bcf1-9b3a5c75804d Running 41h 4.15.0 hosted-forwarder-79558597ff-lfjfk hosted-forwarder-crqq5 worker-zvm-1.hostedn.example.com agent://5e498cd3-542c-e54f-0c58-ed43e28b568a Running 41h 4.15.0 Run the following command to check the cluster version and cluster operator status: oc --kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion See the following output: NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version 4.15.0 True False 40h Cluster version is 4.15.0 Run the following command to check the cluster operator status: oc --kubeconfig <hosted_cluster_name>.kubeconfig get clusteroperators For each component of your cluster, the output shows the following cluster operator statuses: NAME , VERSION , AVAILABLE , PROGRESSING , DEGRADED , SINCE , and MESSAGE . For an output example, see the Initial Operator configuration section in the OpenShift Container Platform documentation. 1.8.9.7.1. Additional resources To scale down the data plane to zero, see Scaling down the data plane to zero . 1.8.10. Configuring the hosting cluster on x86 bare metal for IBM Z compute nodes (Technology Preview) Technology Preview: Configuring the hosting cluster on x86 bare metal for IBM Z ( s390x ) compute nodes is in a Technology Preview status with limited support. You can deploy hosted control planes by configuring a cluster to function as a hosting cluster. The hosting cluster is the OpenShift Container Platform cluster where the control planes are hosted. The hosting cluster is also known as the management cluster. Note: The management cluster is not the managed cluster. A managed cluster is a cluster that the hub cluster manages. You can convert a managed cluster to a hosting cluster by using the hypershift add-on to deploy the HyperShift Operator on that cluster. Then, you can start to create the hosted cluster. The multicluster engine operator 2.5 supports only the default local-cluster , which is a hub cluster that is managed, and the hub cluster as the hosting cluster. Important: To provision hosted control planes on bare metal, you can use the Agent platform. The Agent platform uses the central infrastructure management service to add worker nodes to a hosted cluster. For an introduction to the central infrastructure management service, see Kube API - Getting Started Guide . Each IBM Z system host must be started with the PXE images provided by the central infrastructure management. After each host starts, it runs an Agent process to discover the details of the host and completes the installation. An Agent custom resource represents each host. When you create a hosted cluster with the Agent platform, HyperShift Operator installs the Agent Cluster API provider in the hosted control plane namespace. When you scale up a node pool, a machine is created. The Cluster API provider finds an Agent that is approved, is passing validations, is not currently in use, and meets the requirements that are specified in the node pool specification. You can monitor the installation of an Agent by checking its status and conditions. When you scale down a node pool, Agents are unbound from the corresponding cluster. Before you reuse the clusters, you must boot the clusters by using the PXE image to update the number of nodes. 1.8.10.1. Prerequisites multicluster engine for Kubernetes operator version 2.5 or later must be installed on an OpenShift Container Platform cluster. The multicluster engine operator is installed automatically when you install Red Hat Advanced Cluster Management. You can also install multicluster engine operator without Red Hat Advanced Cluster Management as an Operator from the OpenShift Container Platform OperatorHub. The multicluster engine operator must have at least one managed OpenShift Container Platform cluster. The local-cluster is automatically imported in multicluster engine operator 2.5 and later. See Advanced configuration for more information about the local-cluster . You can check the status of your hub cluster by running the following command: oc get managedclusters local-cluster You need a hosting cluster with at least three worker nodes to run the HyperShift Operator. You need to enable the central infrastructure management service. For more information, see Enabling the central infrastructure management service . You need to install the hosted control plane command line interface. See Installing the hosted control plane command line interface . 1.8.10.2. IBM Z infrastructure requirements The Agent platform does not create any infrastructure, but requires the following for infrastructure: Agents: An Agent represents a host that is booted with a discovery image, or PXE image and is ready to be provisioned as an OpenShift Container Platform node. DNS: The API and Ingress endpoints must be routable. The hosted control planes feature is enabled by default. If you disabled the feature and want to manually enable it, or if you need to disable the feature, see Enabling or disabling the hosted control planes feature . 1.8.10.3. IBM Z configuration documentation After you meet the prerequisites, see the following topics to configure hosted control planes on bare metal: Configuring DNS for hosted control planes with IBM Z Creating a hosted cluster on bare metal Creating an InfraEnv resource for hosted control planes on 64-bit x86 bare metal for IBM Z compute nodes Adding IBM Z agents to the InfraEnv resource (Technology Preview) Scaling the NodePool object for a hosted cluster on IBM Z 1.8.10.4. Adding IBM Z agents to the InfraEnv resource (Technology Preview) To attach compute nodes to a hosted control plane, create agents that help you to scale the node pool. Adding agents in an IBM Z environment requires additional steps, which are described in detail in this section. Note: Unless stated otherwise, these procedures apply to both z/VM and RHEL KVM installations on IBM Z and IBM LinuxONE. 1.8.10.4.1. Adding IBM Z KVM as agents For IBM Z with KVM, run the following command to start your IBM Z environment with the downloaded PXE images from the InfraEnv resource. After the Agents are created, the host communicates with the Assisted Service and registers in the same namespace as the InfraEnv resource on the management cluster. virt-install \ --name "<vm_name>" \ 1 --autostart \ --ram=16384 \ --cpu host \ --vcpus=4 \ --location "<path_to_kernel_initrd_image>,kernel=kernel.img,initrd=initrd.img" \ 2 --disk <qcow_image_path> \ 3 --network network:macvtap-net,mac=<mac_address> \ 4 --graphics none \ --noautoconsole \ --wait=-1 --extra-args "rd.neednet=1 nameserver=<nameserver> coreos.live.rootfs_url=http://<http_server>/rootfs.img random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs=console=tty1 console=ttyS1,115200n8" 5 1 Specify the name of the virtual machine. 2 Specify the location of the kernel_initrd_image file. 3 Specify the disk image path. 4 Specify the Mac address. 5 Specify the server name of the agents. For ISO boot, download ISO from the InfraEnv resource and boot the nodes by running the following command: virt-install \ --name "<vm_name>" \ 1 --autostart \ --memory=16384 \ --cpu host \ --vcpus=4 \ --network network:macvtap-net,mac=<mac_address> \ 2 --cdrom "<path_to_image.iso>" \ 3 --disk <qcow_image_path> \ --graphics none \ --noautoconsole \ --os-variant <os_version> \ 4 --wait=-1 1 Specify the name of the virtual machine. 2 Specify the Mac address. 3 Specify the location of the image.iso file. 4 Specify the pperating system version that you are using. 1.8.10.4.2. Adding IBM Z LPAR as agents With OpenShift Container Platform 4.16, you can add the Logical Partition (LPAR) on IBM Z or IBM LinuxONE as a compute node to a hosted control plane. Create a boot parameter file for the agents. Example parameter file rd.neednet=1 cio_ignore=all,!condev \ console=ttysclp0 \ ignition.firstboot ignition.platform.id=metal coreos.live.rootfs_url=http://<http_server>/rhcos-<version>-live-rootfs.<architecture>.img \ 1 coreos.inst.persistent-kargs=console=ttysclp0 ip=<ip>::<gateway>:<netmask>:<hostname>::none nameserver=<dns> \ 2 rd.znet=qeth,<network_adaptor_range>,layer2=1 rd.<disk_type>=<adapter> \ 3 zfcp.allow_lun_scan=0 ai.ip_cfg_override=1 \ 4 random.trust_cpu=on rd.luks.options=discard 1 For the coreos.live.rootfs_url artifact, specify the matching rootfs artifact for the kernel and initramfs that you are starting. Only HTTP and HTTPS protocols are supported. 2 For the ip parameter, manually assign the IP address, as described in Installing a cluster with z/VM on IBM Z and IBM LinuxONE . 3 For installations on DASD-type disks, use rd.dasd to specify the DASD where Red Hat Enterprise Linux CoreOS (RHCOS) is to be installed. For installations on FCP-type disks, use rd.zfcp=<adapter>,<wwpn>,<lun> to specify the FCP disk where RHCOS is to be installed. 4 Specify this parameter when you use an Open Systems Adapter (OSA) or HiperSockets. Generate the .ins and initrd.img.addrsize files. The .ins file includes installation data and is on the FTP server. You can access the file from the HMC system. The .ins file contains details such as mapping of the location of installation data on the disk or FTP server, the memory locations where the data is to be copied. Note : In OpenShift Container Platform 4.16, the .ins file and initrd.img.addrsize are not automatically generated as part of boot-artifacts from the installer. You must manually generate these files. Run the following commands to get the size of the kernel and initrd : KERNEL_IMG_PATH='./kernel.img' INITRD_IMG_PATH='./initrd.img' CMDLINE_PATH='./generic.prm' kernel_size=USD(stat -c%s USDKERNEL_IMG_PATH ) initrd_size=USD(stat -c%s USDINITRD_IMG_PATH) Round the kernel size up to the MiB boundary. This value is the starting address of initrd.img . offset=USD(( (kernel_size + 1048575) / 1048576 * 1048576 )) Create the kernel binary patch file that contains the initrd address and size by running the following commands: INITRD_IMG_NAME=USD(echo USDINITRD_IMG_PATH | rev | cut -d '/' -f 1 | rev) KERNEL_OFFSET=0x00000000 KERNEL_CMDLINE_OFFSET=0x00010480 INITRD_ADDR_SIZE_OFFSET=0x00010408 OFFSET_HEX=USD(printf '0x%08x\n' USDoffset) Convert the address and size to binary format by running the following commands: printf "USD(printf '%016x\n' USDinitrd_size)" | xxd -r -p > temp_size.bin Merge the address and size binaries by running the following command: cat temp_address.bin temp_size.bin > "USDINITRD_IMG_NAME.addrsize" Clean up temporary files by running the following command: rm -rf temp_address.bin temp_size.bin Create the .ins file. The file is based on the paths of the kernel.img , initrd.img , initrd.img.addrsize , and cmdline files and the memory locations where the data is to be copied. USDKERNEL_IMG_PATH USDKERNEL_OFFSET USDINITRD_IMG_PATH USDOFFSET_HEX USDINITRD_IMG_NAME.addrsize USDINITRD_ADDR_SIZE_OFFSET USDCMDLINE_PATH USDKERNEL_CMDLINE_OFFSET Transfer the initrd , kernel , generic.ins , and initrd.img.addrsize parameter files to the file server. For more information about how to transfer the files with FTP and boot, see Installing in an LPAR . Start the machine. Repeat the procedure for all other machines in the cluster. 1.8.10.4.3. Adding IBM z/VM as agents If you want to use a static IP for z/VM guest, you must configure the NMStateConfig attribute for the z/VM agent so that the IP parameter persists in the second start. Complete the following steps to start your IBM Z environment with the downloaded PXE images from the InfraEnv resource. After the Agents are created, the host communicates with the Assisted Service and registers in the same namespace as the InfraEnv resource on the management cluster. Update the parameter file to add the rootfs_url , network_adaptor and disk_type values. Example parameter file rd.neednet=1 cio_ignore=all,!condev \ console=ttysclp0 \ ignition.firstboot ignition.platform.id=metal \ coreos.live.rootfs_url=http://<http_server>/rhcos-<version>-live-rootfs.<architecture>.img \ 1 coreos.inst.persistent-kargs=console=ttysclp0 ip=<ip>::<gateway>:<netmask>:<hostname>::none nameserver=<dns> \ 2 rd.znet=qeth,<network_adaptor_range>,layer2=1 rd.<disk_type>=<adapter> \ 3 zfcp.allow_lun_scan=0 ai.ip_cfg_override=1 \ 4 1 For the coreos.live.rootfs_url artifact, specify the matching rootfs artifact for the kernel and initramfs that you are starting. Only HTTP and HTTPS protocols are supported. 2 For the ip parameter, manually assign the IP address, as described in Installing a cluster with z/VM on IBM Z and IBM LinuxONE . 3 For installations on DASD-type disks, use rd.dasd to specify the DASD where Red Hat Enterprise Linux CoreOS (RHCOS) is to be installed. For installations on FCP-type disks, use rd.zfcp=<adapter>,<wwpn>,<lun> to specify the FCP disk where RHCOS is to be installed. 4 Specify this parameter when you use an Open Systems Adapter (OSA) or HiperSockets. Move initrd , kernel images, and the parameter file to the guest VM by running the following commands: vmur pun -r -u -N kernel.img USDINSTALLERKERNELLOCATION/<image name> vmur pun -r -u -N generic.parm USDPARMFILELOCATION/paramfilename vmur pun -r -u -N initrd.img USDINSTALLERINITRAMFSLOCATION/<image name> Run the following command from the guest VM console: cp ipl c To list the agents and their properties, enter the following command: oc -n <hosted_control_plane_namespace> get agents See the following example output: NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a auto-assign Run the following command to approve the agent. Optional: You can set the agent ID <installation_disk_id> and <hostname> in the specification. oc -n <hosted_control_plane_namespace> patch agent 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d -p '{"spec":{"installation_disk_id":"/dev/sda","approved":true,"hostname":"worker-zvm-0.hostedn.example.com"}}' --type merge Run the following command to verify that the agents are approved: oc -n <hosted_control_plane_namespace> get agents Example output NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d true auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a true auto-assign 1.8.10.5. Configuring DNS for hosted control plane with IBM Z The API server for the hosted cluster is exposed as a 'NodePort' service. A DNS entry must exist for the api.<hosted-cluster-name>.<base-domain> that points to the destination where the API server is reachable. The DNS entry can be as simple as a record that points to one of the nodes in the managed cluster that is running the hosted control plane. The entry can also point to a load balancer deployed to redirect incoming traffic to the Ingress pods. See the following example of DNS configuration: USD cat /var/named/<example.krnl.es.zone> See the following example output: USD TTL 900 @ IN SOA bastion.example.krnl.es.com. hostmaster.example.krnl.es.com. ( 2019062002 1D 1H 1W 3H ) IN NS bastion.example.krnl.es.com. ; ; api IN A 1xx.2x.2xx.1xx 1 api-int IN A 1xx.2x.2xx.1xx ; ; *.apps IN A 1xx.2x.2xx.1xx ; ;EOF 1 The record refers to the IP address of the API load balancer that handles ingress and egress traffic for hosted control planes. For IBM z/VM, add IP addresses that correspond to the IP address of the agent. compute-0 IN A 1xx.2x.2xx.1yy compute-1 IN A 1xx.2x.2xx.1yy 1.8.10.6. Creating an InfraEnv resource for hosted control planes on x86 bare metal for IBM Z compute nodes An InfraEnv is an environment where hosts that are booted with PXE images can join as agents. In this case, the agents are created in the same namespace as your hosted control plane. See the following procedure to create an InfraEnv resource: Create a YAML file to contain the configuration. See the following example: apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: <hosted-cluster-name> namespace: <hosted-control-plane-namespace> spec: cpuArchitecture: s390x pullSecretRef: name: pull-secret sshAuthorizedKey: <ssh-public-key> Save the file as infraenv-config.yaml . Apply the configuration by entering the following command: To fetch the URL to download the PXE images, such as, initrd.img , kernel.img , or rootfs.img , which allows IBM Z machines to join as agents, enter the following command: 1.8.10.7. Scaling the NodePool object for a hosted cluster on IBM Z The NodePool object is created when you create a hosted cluster. By scaling the NodePool object, you can add more compute nodes to the hosted control plane. Run the following command to scale the NodePool object to two nodes: oc -n <clusters_namespace> scale nodepool <nodepool_name> --replicas 2 The Cluster API agent provider randomly picks two agents that are then assigned to the hosted cluster. Those agents go through different states and finally join the hosted cluster as OpenShift Container Platform nodes. The agents pass through the transition phases in the following order: binding discovering insufficient installing installing-in-progress added-to-existing-cluster Run the following command to see the status of a specific scaled agent: oc -n <hosted_control_plane_namespace> get agent -o jsonpath='{range .items[*]}BMH: {@.metadata.labels.agent-install\.openshift\.io/bmh} Agent: {@.metadata.name} State: {@.status.debugInfo.state}{"\n"}{end}' See the following output: BMH: Agent: 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d State: known-unbound BMH: Agent: 5e498cd3-542c-e54f-0c58-ed43e28b568a State: insufficient Run the following command to see the transition phases: oc -n <hosted_control_plane_namespace> get agent See the following output: NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d hosted-forwarder true auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a true auto-assign da503cf1-a347-44f2-875c-4960ddb04091 hosted-forwarder true auto-assign Run the following command to generate the kubeconfig file to access the hosted cluster: hcp create kubeconfig --namespace <clusters_namespace> --name <hosted_cluster_namespace> > <hosted_cluster_name>.kubeconfig After the agents reach the added-to-existing-cluster state, verify that you can see the OpenShift Container Platform nodes by entering the following command: oc --kubeconfig <hosted_cluster_name>.kubeconfig get nodes See the following output: NAME STATUS ROLES AGE VERSION worker-zvm-0.hostedn.example.com Ready worker 5m41s v1.24.0+3882f8f worker-zvm-1.hostedn.example.com Ready worker 6m3s v1.24.0+3882f8f Cluster Operators start to reconcile by adding workloads to the nodes. Enter the following command to verify that two machines were created when you scaled up the NodePool object: oc -n <hosted_control_plane_namespace> get machine.cluster.x-k8s.io See the following output: NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION hosted-forwarder-79558597ff-5tbqp hosted-forwarder-crqq5 worker-zvm-0.hostedn.example.com agent://50c23cda-cedc-9bbd-bcf1-9b3a5c75804d Running 41h 4.15.0 hosted-forwarder-79558597ff-lfjfk hosted-forwarder-crqq5 worker-zvm-1.hostedn.example.com agent://5e498cd3-542c-e54f-0c58-ed43e28b568a Running 41h 4.15.0 Run the following command to check the cluster version: oc --kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion,co See the following output: NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version 4.15.0-ec.2 True False 40h Cluster version is 4.15.0-ec.2 Run the following command to check the cluster operator status: oc --kubeconfig <hosted_cluster_name>.kubeconfig get clusteroperators For each component of your cluster, the output shows the following cluster operator statuses: NAME , VERSION , AVAILABLE , PROGRESSING , DEGRADED , SINCE , and MESSAGE . For an output example, see the Initial Operator configuration section in the OpenShift Container Platform documentation. 1.8.10.8. Destroying a hosted cluster on x86 bare metal with IBM Z compute nodes To destroy a hosted cluster on x86 bare metal with IBM Z compute nodes and its managed cluster resource, complete the following steps: Run the following command to scale the NodePool object to zero (0) nodes: oc -n <clusters_namespace> scale nodepool <nodepool_name> --replicas 0 After the node pool is scaled to 0, the compute nodes are detached from the hosted cluster. If the compute nodes are not detached from the hosted cluster or are stuck in the Notready state, delete the compute nodes manually by running the following command: oc --kubeconfig <hosted_clusted_name>.kubeconfig delete node <comopute_node_name> Verify the compute nodes status by entering the following command: oc --kubeconfig <hosted_cluster_name>.kubeconfig get nodes After the compute nodes are detached from the hosted cluster, the status of the agents is changed to auto-assign . Delete the agents from the cluster by running the following command: oc -n <hosted_control_plane_namespace> delete agent <agent_name> Note: You can delete the virtual machines that you created as agents after you delete the agents from the cluster. Run the following command by using the hcp command-line to destroy the hosted control plane: hcp destroy cluster agent --name <hosted_cluster_name> --namespace <clusters_namepsace> 1.8.11. Managing hosted control plane clusters on OpenShift Virtualization With hosted control planes and Red Hat OpenShift Virtualization, you can create OpenShift Container Platform clusters with worker nodes that are hosted by KubeVirt virtual machines. Hosted control planes on OpenShift Virtualization provides several benefits: Enhances resource usage by packing hosted control planes and hosted clusters in the same underlying bare metal infrastructure Separates hosted control planes and hosted clusters to provide strong isolation Reduces cluster provision time by eliminating the bare metal node bootstrapping process Manages many releases under the same base OpenShift Container Platform cluster The hosted control planes feature is enabled by default. You can use the hosted control plane command line interface, hcp , to create an OpenShift Container Platform hosted cluster. The hosted cluster is automatically imported as a managed cluster. If you want to disable this automatic import feature, see Disabling the automatic import of hosted clusters into multicluster engine operator . Important: Run the hub cluster and workers on the same platform for hosted control planes. Each hosted cluster must have a cluster-wide unique name. A hosted cluster name cannot be the same as any existing managed cluster in order for multicluster engine operator to manage it. Do not use clusters as a hosted cluster name. A hosted cluster cannot be created in the namespace of a multicluster engine operator managed cluster. When you configure storage for hosted control planes, consider the recommended etcd practices. To ensure that you meet the latency requirements, dedicate a fast storage device to all hosted control plane etcd instances that run on each control-plane node. You can use LVM storage to configure a local storage class for hosted etcd pods. For more information, see Recommended etcd practices and Persistent storage using logical volume manager storage in the OpenShift Container Platform documentation. 1.8.11.1. Prerequisites You must meet the following prerequisites to create an OpenShift Container Platform cluster on OpenShift Virtualization: You need administrator access to an OpenShift Container Platform cluster, version 4.14 or later, specified by the KUBECONFIG environment variable. The OpenShift Container Platform hosting cluster must have wildcard DNS routes enabled, as shown in the following DNS: The OpenShift Container Platform hosting cluster must have OpenShift Virtualization, version 4.14 or later, installed on it. For more information, see Installing OpenShift Virtualization using the web console . The OpenShift Container Platform hosting cluster must be configured with OVNKubernetes as the default pod network CNI. The OpenShift Container Platform hosting cluster must have a default storage class. For more information, see Postinstallation storage configuration . The following example shows how to set a default storage class: You need a valid pull secret file for the quay.io/openshift-release-dev repository. For more information, see Install OpenShift on any x86_64 platform with user-provisioned infrastructure . You need to install the hosted control plane command line interface . Before you can provision your cluster, you need to configure a load balancer. For more information, see Optional: Configuring MetalLB . For optimal network performance, use a network maximum transmission unit (MTU) of 9000 or greater on the OpenShift Container Platform cluster that hosts the KubeVirt virtual machines. If you use a lower MTU setting, network latency and the throughput of the hosted pods are affected. Enable multiqueue on node pools only when the MTU is 9000 or greater. The multicluster engine operator must have at least one managed OpenShift Container Platform cluster. The local-cluster is automatically imported. See Advanced configuration for more information about the local-cluster . You can check the status of your hub cluster by running the following command: 1.8.11.2. Firewall and port requirements Ensure that you meet the firewall and port requirements so that ports can communicate between the management cluster, the control plane, and hosted clusters: The kube-apiserver service runs on port 6443 by default and requires ingress access for communication between the control plane components. If you use the NodePort publishing strategy, ensure that the node port that is assigned to the kube-apiserver service is exposed. If you use MetalLB load balancing, allow ingress access to the IP range that is used for load balancer IP addresses. If you use the NodePort publishing strategy, use a firewall rule for the ignition-server and Oauth-server settings. The konnectivity agent, which establishes a reverse tunnel to allow bi-directional communication on the hosted cluster, requires egress access to the cluster API server address on port 6443. With that egress access, the agent can reach the kube-apiserver service. If the cluster API server address is an internal IP address, allow access from the workload subnets to the IP address on port 6443. If the address is an external IP address, allow egress on port 6443 to that external IP address from the nodes. If you change the default port of 6443, adjust the rules to reflect that change. Ensure that you open any ports that are required by the workloads that run in the clusters. Use firewall rules, security groups, or other access controls to restrict access to only required sources. Avoid exposing ports publicly unless necessary. For production deployments, use a load balancer to simplify access through a single IP address. For additional resources about hosted control planes on Red Hat OpenShift Virtualization, see the following documentation: To learn about etcd and LVM storage recommendations, see Recommended etcd practices and Persistent storage using logical volume manager storage . To configure hosted control planes on Red Hat OpenShift Virtualization in a disconnected environment, see Configuring hosted control planes in a disconnected environment . To disable the hosted control planes feature or, if you already disabled it and want to manually enable it, see Enabling or disabling the hosted control planes feature . To manage hosted clusters by running Red Hat Ansible Automation Platform jobs, see Configuring Ansible Automation Platform jobs to run on hosted clusters . 1.8.11.3. Creating a hosted cluster with the KubeVirt platform With OpenShift Container Platform 4.14 and later, you can create a cluster with KubeVirt, to include creating with an external infrastructure. Learn more about the process to create with KubeVirt: Creating a hosted cluster Creating a hosted cluster by using external infrastructure 1.8.11.3.1. Creating a hosted cluster To create a hosted cluster, use the hosted control plane command line interface, hcp : 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 6Gi . 5 Specify a value for CPU, for example, 2 . 6 Specify the etcd storage class name, for example, lvm-storageclass . Note: You can use the --release-image flag to set up the hosted cluster with a specific OpenShift Container Platform release. A default node pool is created for the cluster with two virtual machine worker replicas according to the --node-pool-replicas flag. After a few moments, verify that the hosted control plane pods are running by entering the following command: See the following example output: A hosted cluster that has worker nodes that are backed by KubeVirt virtual machines typically takes 10-15 minutes to be fully provisioned. To check the status of the hosted cluster, see the corresponding HostedCluster resource by entering the following command: See the following example output, which illustrates a fully provisioned HostedCluster object: Replace 4.x.0 with the supported OpenShift Container Platform version that you want to use. Access the hosted cluster by following the instructions in Accessing the hosted cluster . 1.8.11.3.2. Creating a hosted cluster by using external infrastructure By default, the HyperShift Operator hosts both the control plane pods of the hosted cluster and the KubeVirt worker VMs within the same cluster. With the external infrastructure feature, you can place the worker node VMs on a separate cluster from the control plane pods. The management cluster is the OpenShift Container Platform cluster that runs the HyperShift Operator and hosts the control plane pods for a hosted cluster. The infrastructure cluster is the OpenShift Container Platform cluster that runs the KubeVirt worker VMs for a hosted cluster. By default, the management cluster also acts as the infrastructure cluster that hosts VMs. However, for external infrastructure, the management and infrastructure clusters are different. 1.8.11.3.2.1. Prerequisites for external infrastructure You must have a namespace on the external infrastructure cluster for the KubeVirt nodes to be hosted in. You must have a kubeconfig file for the external infrastructure cluster. 1.8.11.3.2.2. Creating a hosted cluster by using the hcp command line interface You can create a hosted cluster by using the hcp command line interface. To place the KubeVirt worker VMs on the infrastructure cluster, use the --infra-kubeconfig-file and --infra-namespace arguments, as shown in the following example: 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 6Gi . 5 Specify a value for CPU, for example, 2 . 6 Specify the infrastructure namespace, for example, clusters-example . 7 Specify the path to your kubeconfig file for the infrastructure cluster, for example, /user/name/external-infra-kubeconfig . After you enter that command, the control plane pods are hosted on the management cluster that the HyperShift Operator runs on, and the KubeVirt VMs are hosted on a separate infrastructure cluster. Access the hosted cluster by following the instructions in Accessing the hosted cluster . 1.8.11.3.3. Creating a hosted cluster by using the console To create a hosted cluster with the KubeVirt platform by using the console, complete the following steps: Open the OpenShift Container Platform web console and log in by entering your administrator credentials. For instructions to open the console, see Accessing the web console in the OpenShift Container Platform documentation. In the console header, ensure that All Clusters is selected. Click Infrastructure > Clusters . Click Create cluster > Red Hat OpenShift Virtualization > Hosted . On the Create cluster page, follow the prompts to enter details about the cluster and node pools. Notes: If you want to use predefined values to automatically populate fields in the console, you can create a Red Hat OpenShift Virtualization credential. For more information, see Creating a credential for an on-premises environment . On the Cluster details page, the pull secret is your OpenShift Container Platform pull secret that you use to access OpenShift Container Platform resources. If you selected a Red Hat OpenShift Virtualization credential, the pull secret is automatically populated. Review your entries and click Create . The Hosted cluster view is displayed. Monitor the deployment of the hosted cluster in the Hosted cluster view. If you do not see information about the hosted cluster, ensure that All Clusters is selected, and click the cluster name. Wait until the control plane components are ready. This process can take a few minutes. To view the node pool status, scroll to the NodePool section. The process to install the nodes takes about 10 minutes. You can also click Nodes to confirm whether the nodes joined the hosted cluster. 1.8.11.3.4. Additional resources To create credentials that you can reuse when you create a hosted cluster with the console, see Creating a credential for an on-premises environment . To access a hosted cluster, see Accessing the hosted cluster . 1.8.11.4. Default ingress and DNS behavior Every OpenShift Container Platform cluster includes a default application Ingress Controller, which must have an wildcard DNS record associated with it. By default, hosted clusters that are created by using the HyperShift KubeVirt provider automatically become a subdomain of the OpenShift Container Platform cluster that the KubeVirt virtual machines run on. For example, your OpenShift Container Platform cluster might have the following default ingress DNS entry: *.apps.mgmt-cluster.example.com As a result, a KubeVirt hosted cluster that is named guest and that runs on that underlying OpenShift Container Platform cluster has the following default ingress: *.apps.guest.apps.mgmt-cluster.example.com For the default ingress DNS to work properly, the cluster that hosts the KubeVirt virtual machines must allow wildcard DNS routes. You can configure this behavior by entering the following command: Note: When you use the default hosted cluster ingress, connectivity is limited to HTTPS traffic over port 443. Plain HTTP traffic over port 80 is rejected. This limitation applies to only the default ingress behavior. 1.8.11.4.1. Customizing ingress and DNS behavior If you do not want to use the default ingress and DNS behavior, you can configure a KubeVirt hosted cluster with a unique base domain at creation time. This option requires manual configuration steps during creation and involves three main steps: cluster creation, load balancer creation, and wildcard DNS configuration. 1.8.11.4.1.1. Deploying a hosted cluster that specifies the base domain To create a hosted cluster that specifies the base domain, enter the following command: 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 6Gi . 5 Specify a value for CPU, for example, 2 . 6 Specify the base domain, for example, hypershift.lab . As a result, the hosted cluster has an ingress wildcard that is configured for the cluster name and the base domain, for example, .apps.example.hypershift.lab . The hosted cluster remains in Partial status. Because, after creating a hosted cluster with unique base domain, you must configure the required DNS records and load balancer. View the status of your hosted cluster by entering the following command: See the following example output: Access the cluster by entering the following commands: See the following example output: Replace 4.x.0 with the supported OpenShift Container Platform version that you want to use. The steps fixes the errors in the output. Note: If your hosted cluster is on bare metal, you might need MetalLB to set up load balancer services. For more information, see Optional: Configuring MetalLB . 1.8.11.4.1.2. Setting up the load balancer Set up the load balancer service that routes ingress traffic to the KubeVirt VMs and assigns a wildcard DNS entry to the load balancer IP address. A NodePort service that exposes the hosted cluster ingress already exists. You can export the node ports and create the load balancer service that targets those ports. Get the HTTP node port by entering the following command: Note the HTTP node port value to use in the step. Get the HTTPS node port by entering the following command: Note the HTTPS node port value to use in the step. Create the load balancer service by entering the following command: 1 Specify the HTTPS node port value that you noted in the step. 2 Specify the HTTP node port value that you noted in the step. 1.8.11.4.1.3. Setting up a wildcard DNS Set up up a wildcard DNS record or CNAME that references the external IP of the load balancer service. Get the external IP address by entering the following command: See the following example output: Configure a wildcard DNS entry that references the external IP address. View the following example DNS entry: *.apps.<hosted-cluster-name\>.<base-domain\>. The DNS entry must be able to route inside and outside of the cluster. See the following DNS resolutions example: Check that hosted cluster status has moved from Partial to Completed by entering the following command: See the following example output: Replace 4.x.0 with the supported OpenShift Container Platform version that you want to use. 1.8.11.4.1.4. Additional resources Managing hosted control plane clusters on OpenShift Virtualization Optional: Configuring MetalLB Return to the beginning of this topic, Default ingress and DNS behavior . 1.8.11.5. Optional: Configuring MetalLB You must install the MetalLB Operator before configuring MetalLB. For more information, see Installing the MetalLB Operator in the OpenShift Container Platform documentation. Perform the following steps to configure MetalLB on your guest cluster: Create a MetalLB resource by saving the following sample YAML content in the configure-metallb.yaml file: apiVersion: metallb.io/v1beta1 kind: MetalLB metadata: name: metallb namespace: metallb-system Apply the YAML content by entering the following command: oc apply -f configure-metallb.yaml See the following example output: Create a IPAddressPool resource by saving the following sample YAML content in the create-ip-address-pool.yaml file: apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: metallb namespace: metallb-system spec: addresses: - 192.168.216.32-192.168.216.122 1 1 Create an address pool with an available range of IP addresses within the node network. Replace the IP address range with an unused pool of available IP addresses in your network. Apply the YAML content by entering the following command: oc apply -f create-ip-address-pool.yaml See the following example output: Create a L2Advertisement resource by saving the following sample YAML content in the l2advertisement.yaml file: apiVersion: metallb.io/v1beta1 kind: L2Advertisement metadata: name: l2advertisement namespace: metallb-system spec: ipAddressPools: - metallb Apply the YAML content by entering the following command: oc apply -f l2advertisement.yaml See the following example output: 1.8.11.5.1. Additional resources For more information about MetalLB, see Installing the MetalLB Operator . 1.8.11.6. Configuring additional networks, guaranteed CPUs, and VM scheduling for node pools If you need to configure additional networks for node pools, request a guaranteed CPU access for Virtual Machines (VMs), or manage scheduling of KubeVirt VMs, see the following procedures. 1.8.11.6.1. Adding multiple networks to a node pool By default, nodes generated by a node pool are attached to the pod network. You can attach additional networks to the nodes by using Multus and NetworkAttachmentDefinitions. To add multiple networks to nodes, use the --additional-network argument by running the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --additional-network name:<namespace/name> \ 6 --additional-network name:<namespace/name> 1 Specify the name of your hosted cluster, for instance, example . 2 Specify your worker node count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify the memory value, for example, 8Gi . 5 Specify the CPU value, for example, 2 . 6 Set the value of the -additional-network argument to name:<namespace/name> . Replace <namespace/name> with a namespace and name of your NetworkAttachmentDefinitions. 1.8.11.6.1.1. Using an additional network as default You can add your additional network as a default network for the nodes by disabling the default pod network. To add an additional network as default to your nodes, run the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --attach-default-network false \ 6 --additional-network name:<namespace>/<network_name> 7 1 Specify the name of your hosted cluster, for instance, example . 2 Specify your worker node count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify the memory value, for example, 8Gi . 5 Specify the CPU value, for example, 2 . 6 The --attach-default-network false argument disables the default pod network. 7 Specify the additional network that you want to add to your nodes, for example, name:my-namespace/my-network . 1.8.11.6.2. Requesting guaranteed CPU resources By default, KubeVirt VMs might share its CPUs with other workloads on a node. This might impact performance of a VM. To avoid the performance impact, you can request a guaranteed CPU access for VMs. To request guaranteed CPU resources, set the --qos-class argument to Guaranteed by running the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --qos-class Guaranteed 6 1 Specify the name of your hosted cluster, for instance, example . 2 Specify your worker node count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify the memory value, for example, 8Gi . 5 Specify the CPU value, for example, 2 . 6 The --qos-class Guaranteed argument guarantees that the specified number of CPU resources are assigned to VMs. 1.8.11.6.3. Scheduling KubeVirt VMs on a set of nodes By default, KubeVirt VMs created by a node pool are scheduled to any available nodes. You can schedule KubeVirt VMs on a specific set of nodes that has enough capacity to run the VM. To schedule KubeVirt VMs within a node pool on a specific set of nodes, use the --vm-node-selector argument by running the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --vm-node-selector <label_key>=<label_value>,<label_key>=<label_value> 6 1 Specify the name of your hosted cluster, for instance, example . 2 Specify your worker node count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify the memory value, for example, 8Gi . 5 Specify the CPU value, for example, 2 . 6 The --vm-node-selector flag defines a specific set of nodes that contains the key-value pairs. Replace <label_key> and <label_value> with the key and value of your labels respectively. 1.8.11.7. Scaling a node pool You can manually scale a node pool by using the oc scale command: After a few moments, enter the following command to see the status of the node pool: See the following example output: 1.8.11.7.1. Adding node pools You can create node pools for a hosted cluster by specifying a name, number of replicas, and any additional information, such as memory and CPU requirements. To create a node pool, enter the following information. In this example, the node pool has more CPUs assigned to the VMs: Check the status of the node pool by listing nodepool resources in the clusters namespace: See the following example output: Replace 4.x.0 with the supported OpenShift Container Platform version that you want to use. After some time, you can check the status of the node pool by entering the following command: See the following example output: Verify that the node pool is in the status that you expect by entering this command: See the following example output: Replace 4.x.0 with the supported OpenShift Container Platform version that you want to use. 1.8.11.7.1.1. Additional resources Managing hosted control plane clusters on OpenShift Virtualization To scale down the data plane to zero, see Scaling down the data plane to zero . 1.8.11.8. Verifying hosted cluster creation on OpenShift Virtualization To verify that your hosted cluster was successfully created, complete the following steps. Verify that the HostedCluster resource transitioned to the completed state by entering the following command: See the following example output: Verify that all the cluster operators in the hosted cluster are online by entering the following commands: See the following example output: 1.8.11.9. Configuring storage for hosted control planes on OpenShift Virtualization If you do not provide any advanced storage configuration, the default storage class is used for the KubeVirt virtual machine (VM) images, the KubeVirt Container Storage Interface (CSI) mapping, and the etcd volumes. The following table lists the capabilities that the infrastructure must provide to support persistent storage in a hosted cluster: Table 1.13. Persistent storage modes in a hosted cluster Infrastructure CSI provider Hosted cluster CSI provider Hosted cluster capabilities Notes Any RWX Block CSI provider kubevirt-csi Basic: RWO Block and File , RWX Block and Snapshot Recommended Any RWX Block CSI provider Red Hat OpenShift Data Foundation external mode Red Hat OpenShift Data Foundation feature set Any RWX Block CSI provider Red Hat OpenShift Data Foundation internal mode Red Hat OpenShift Data Foundation feature set Do not use 1.8.11.9.1. Mapping KubeVirt CSI storage classes KubeVirt CSI supports mapping a infrastructure storage class that is capable of ReadWriteMany (RWX) access. You can map the infrastructure storage class to hosted storage class during cluster creation. To map the infrastructure storage class to the hosted storage class, use the --infra-storage-class-mapping argument by running the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class> \ 6 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 8Gi . 5 Specify a value for CPU, for example, 2 . 6 Replace <infrastructure_storage_class> with the infrastructure storage class name and <hosted_storage_class> with the hosted cluster storage class name. You can use the --infra-storage-class-mapping argument multiple times within the hcp create cluster command. After you create the hosted cluster, the infrastructure storage class is visible within the hosted cluster. When you create a Persistent Volume Claim (PVC) within the hosted cluster that uses one of those storage classes, KubeVirt CSI provisions that volume by using the infrastructure storage class mapping that you configured during cluster creation. Note: KubeVirt CSI supports mapping only an infrastructure storage class that is capable of RWX access. The following table shows how volume and access mode capabilities map to KubeVirt CSI storage classes: Table 1.14. Mapping KubeVirt CSI storage classes to access and volume modes Infrastructure CSI capability Hosted cluster CSI capability VM live migration support Notes RWX: Block or Filesystem ReadWriteOnce (RWO) Block or Filesystem RWX Block only Supported Use Block mode because Filesystem volume mode results in degraded hosted Block mode performance. RWX Block volume mode is supported only when the hosted cluster is OpenShift Container Platform 4.16 or later. RWO Block storage RWO Block storage or Filesystem Not supported Lack of live migration support affects the ability to update the underlying infrastructure cluster that hosts the KubeVirt VMs. RWO FileSystem RWO Block or Filesystem Not supported Lack of live migration support affects the ability to update the underlying infrastructure cluster that hosts the KubeVirt VMs. Use of the infrastructure Filesystem volume mode results in degraded hosted Block mode performance. 1.8.11.9.2. Mapping a single KubeVirt CSI volume snapshot class You can expose your infrastructure volume snapshot class to the hosted cluster by using KubeVirt CSI. To map your volume snapshot class to the hosted cluster, use the --infra-volumesnapshot-class-mapping argument when creating a hosted cluster. Run the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class> \ 6 --infra-volumesnapshot-class-mapping=<infrastructure_volume_snapshot_class>/<hosted_volume_snapshot_class> 7 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 8Gi . 5 Specify a value for CPU, for example, 2 . 6 Replace <infrastructure_storage_class> with the storage class present in the infrastructure cluster. Replace <hosted_storage_class> with the storage class present in the hosted cluster. 7 Replace <infrastructure_volume_snapshot_class> with the volume snapshot class present in the infrastructure cluster. Replace <hosted_volume_snapshot_class> with the volume snapshot class present in the hosted cluster. Note: If you do not use the --infra-storage-class-mapping and --infra-volumesnapshot-class-mapping arguments, a hosted cluster is created with the default storage class and the volume snapshot class. Therefore, you must set the default storage class and the volume snapshot class in the infrastructure cluster. 1.8.11.9.3. Mapping multiple KubeVirt CSI volume snapshot classes You can map multiple volume snapshot classes to the hosted cluster by assigning them to a specific group. The infrastructure storage class and the volume snapshot class are compatible with each other only if they belong to a same group. To map multiple volume snapshot classes to the hosted cluster, use the group option when creating a hosted cluster. Run the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class>,group=<group_name> \ 6 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class>,group=<group_name> \ --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class>,group=<group_name> \ --infra-volumesnapshot-class-mapping=<infrastructure_volume_snapshot_class>/<hosted_volume_snapshot_class>,group=<group_name> \ 7 --infra-volumesnapshot-class-mapping=<infrastructure_volume_snapshot_class>/<hosted_volume_snapshot_class>,group=<group_name> 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 8Gi . 5 Specify a value for CPU, for example, 2 . 6 Replace <infrastructure_storage_class> with the storage class present in the infrastructure cluster. Replace <hosted_storage_class> with the storage class present in the hosted cluster. Replace <group_name> with the group name. For example, infra-storage-class-mygroup/hosted-storage-class-mygroup,group=mygroup and infra-storage-class-mymap/hosted-storage-class-mymap,group=mymap . 7 Replace <infrastructure_volume_snapshot_class> with the volume snapshot class present in the infrastructure cluster. Replace <hosted_volume_snapshot_class> with the volume snapshot class present in the hosted cluster. For example, infra-vol-snap-mygroup/hosted-vol-snap-mygroup,group=mygroup and infra-vol-snap-mymap/hosted-vol-snap-mymap,group=mymap . 1.8.11.9.4. Configuring KubeVirt VM root volume At cluster creation time, you can configure the storage class that is used to host the KubeVirt VM root volumes by using the --root-volume-storage-class argument. To set a custom storage class and volume size for KubeVirt VMs, run the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --root-volume-storage-class <root_volume_storage_class> \ 6 --root-volume-size <volume_size> 7 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 8Gi . 5 Specify a value for CPU, for example, 2 . 6 Specify a name of the storage class to host the KubeVirt VM root volumes, for example, ocs-storagecluster-ceph-rbd . 7 Specify the volume size, for example, 64 . As a result, you get a hosted cluster created with VMs hosted on PVCs. 1.8.11.9.5. Enabling KubeVirt VM image caching You can use KubeVirt VM image caching to optimize both cluster startup time and storage utilization. KubeVirt VM image caching supports the use of a storage class that is capable of smart cloning and the ReadWriteMany access mode. For more information about smart cloning, see Cloning a data volume using smart-cloning . Image caching works as follows: The VM image is imported to a PVC that is associated with the hosted cluster. A unique clone of that PVC is created for every KubeVirt VM that is added as a worker node to the cluster. Image caching reduces VM startup time by requiring only a single image import. It can further reduce overall cluster storage usage when the storage class supports copy-on-write cloning. To enable image caching, during cluster creation, use the --root-volume-cache-strategy=PVC argument by running the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --root-volume-cache-strategy=PVC 6 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 8Gi . 5 Specify a value for CPU, for example, 2 . 6 Specify a strategy for image caching, for example, PVC . 1.8.11.9.6. Configuring etcd storage At cluster creation time, you can configure the storage class that is used to host etcd data by using the --etcd-storage-class argument. To configure a storage class for etcd, run the following command: hcp create cluster kubevirt \ --name <hosted_cluster_name> \ 1 --node-pool-replicas <worker_node_count> \ 2 --pull-secret <path_to_pull_secret> \ 3 --memory <memory> \ 4 --cores <cpu> \ 5 --etcd-storage-class=<etcd_storage_class_name> 6 1 Specify the name of your hosted cluster, for instance, example . 2 Specify the worker count, for example, 2 . 3 Specify the path to your pull secret, for example, /user/name/pullsecret . 4 Specify a value for memory, for example, 8Gi . 5 Specify a value for CPU, for example, 2 . 6 Specify the etcd storage class name, for example, lvm-storageclass . If you do not provide an --etcd-storage-class argument, the default storage class is used. 1.8.11.9.6.1. Additional resources Cloning a data volume using smart-cloning 1.8.11.10. Destroying a hosted cluster on OpenShift Virtualization To destroy a hosted cluster and its managed cluster resource, complete the following steps: Delete the managed cluster resource on multicluster engine operator by running the following command: where cluster_name is the name of your cluster. Delete the hosted cluster and its back-end resources by running the following command: Replace names where necessary. 1.8.12. Configuring hosted control planes in a disconnected environment In the context of hosted control planes, a disconnected environment is an OpenShift Container Platform deployment that is not connected to the internet and that uses hosted control planes as a base. Technology Preview: You can deploy hosted control planes in a disconnected environment on bare-metal platforms by using an IPv4 or IPv6 network. In addition, hosted control planes in a disconnected environment is available on a dual-stack network as a Technology Preview feature. If you use the Red Hat OpenShift Virtualization platform, hosted control planes in a disconnected environment is available as a Technology Preview feature only. 1.8.12.1. Disconnected environment architecture When you provision hosted control planes on bare metal, you use the Agent platform. The Agent platform and multicluster engine operator work together to enable disconnected deployments. The Agent platform uses the central infrastructure management service to add worker nodes to a hosted cluster. For an introduction to the central infrastructure management service, see Enabling the central infrastructure management service . The following diagram illustrates an example architecture of a disconnected environment: Configure infrastructure services, including the registry certificate deployment with TLS support, web server, and DNS, to ensure that the disconnected deployment works. Create a config map in the openshift-config namespace. In this example, the config map is named registry-config . The content of the config map is the Registry CA certificate. The data field of the config map must contain the following key/value: Key: <registry_dns_domain_name>..<port> , for example, registry.hypershiftdomain.lab..5000: . Ensure that you place .. after the registry DNS domain name when you specify a port. Value: The certificate content For more information about creating a config map, see Configuring TLS certificates for an IPv4 network . Modify the images.config.openshift.io custom resource (CR) specification and adds a new field named additionalTrustedCA with a value of name: registry-config . Create a config map that contains two data fields. One field contains the registries.conf file in RAW format, and the other field contains the Registry CA and is named ca-bundle.crt . The config map belongs to the multicluster-engine namespace, and the config map name is referenced in other objects. For an example of a config map, see the following sample configuration: In the multicluster engine operator namespace, you create the multiclusterengine CR, which enables both the Agent and hypershift-addon add-ons. The multicluster engine operator namespace must contain the config maps to modify behavior in a disconnected deployment. The namespace also contains the multicluster-engine , assisted-service , and hypershift-addon-manager pods. Create the following objects that are necessary to deploy the hosted cluster: Secrets: Secrets contain the pull secret, SSH key, and etcd encryption key. Config map: The config map contains the CA certificate of the private registry. HostedCluster : The HostedCluster resource defines the configuration of the cluster that the user intends to create. NodePool : The NodePool resource identifies the node pool that references the machines to use for the data plane. After you create the hosted cluster objects, the HyperShift Operator establishes the HostedControlPlane namespace to accommodate control plane pods. The namespace also hosts components such as Agents, bare metal hosts (BMHs), and the InfraEnv resource. Later, you create the InfraEnv resource, and after ISO creation, you create the BMHs and their secrets that contain baseboard management controller (BMC) credentials. The Metal3 Operator in the openshift-machine-api namespace inspects the new BMHs. Then, the Metal3 Operator tries to connect to the BMCs to start them by using the configured LiveISO and RootFS values that are specified through the AgentServiceConfig CR in the multicluster engine operator namespace. After the worker nodes of the HostedCluster resource are started, an Agent container is started. This agent establishes contact with the Assisted Service, which orchestrates the actions to complete the deployment. Initially, you need to scale the NodePool resource to the number of worker nodes for the HostedCluster resource. The Assisted Service manages the remaining tasks. At this point, you wait for the deployment process to be completed. 1.8.12.2. Prerequisites To configure hosted control planes in a disconnected environment, you must meet the following prerequisites: CPU: The number of CPUs provided determines how many hosted clusters can run concurrently. In general, use 16 CPUs for each node for 3 nodes. For minimal development, you can use 12 CPUs for each node for 3 nodes. Memory: The amount of RAM affects how many hosted clusters can be hosted. Use 48 GB of RAM for each node. For minimal development, 18 GB of RAM might be sufficient. Storage: Use SSD storage for multicluster engine operator. Management cluster: 250 GB. Registry: The storage needed depends on the number of releases, operators, and images that are hosted. An acceptable number might be 500 GB, preferably separated from the disk that hosts the hosted cluster. Web server: The storage needed depends on the number of ISOs and images that are hosted. An acceptable number might be 500 GB. Production: For a production environment, separate the management cluster, the registry, and the web server on different disks. This example illustrates a possible configuration for production: Registry: 2 TB Management cluster: 500 GB Web server: 2 TB 1.8.12.3. Extracting the OpenShift Container Platform release image digest You can extract the OpenShift Container Platform release image digest by using the tagged image. Complete the following steps: Obtain the image digest by running the following command: oc adm release info <tagged_openshift_release_image> | grep "Pull From" Replace <tagged_openshift_release_image> with the tagged image for the supported OpenShift Container Platform version, for example, quay.io/openshift-release-dev/ocp-release:4.14.0-x8_64 . See the following example output: To know more about the image tag and digest, see Referencing images in imagestreams in the OpenShift Container Platform documentation. 1.8.12.3.1. Additional resources Configuring TLS certificates for an IPv4 network Referencing images in imagestreams 1.8.12.4. Monitoring user workload in a disconnected environment The hypershift-addon managed cluster add-on enables the --enable-uwm-telemetry-remote-write option in the HyperShift Operator. By enabling that option, you ensure that user workload monitoring is enabled and that it can remotely write telemetry metrics from control planes. If you installed multicluster engine operator on OpenShift Container Platform clusters that are not connected to the internet, when you try to run the user workload monitoring feature of the HyperShift Operator by entering the following command, the feature fails with an error: The error might look like this example: To avoid that error, you must disable the user workload monitoring option by creating a config map in the local-cluster namespace. You can create the config map either before or after you enable the add-on. The add-on agent reconfigures the HyperShift Operator. Create the following config map: kind: ConfigMap apiVersion: v1 metadata: name: hypershift-operator-install-flags namespace: local-cluster data: installFlagsToAdd: "" installFlagsToRemove: "--enable-uwm-telemetry-remote-write" 1.8.12.4.1. Verifying the status of the hosted control plane feature The hosted control plane feature is enabled by default. If the feature is disabled and you want to enable it, enter the following command. Replace multiclusterengine with the name of your multicluster engine operator instance: When you enable the feature, the hypershift-addon managed cluster add-on is installed in the local-cluster managed cluster, and the add-on agent installs the HyperShift Operator on the multicluster engine operator hub cluster. Confirm that the hypershift-addon managed cluster add-on is installed by entering the following command: See the resulting output: To avoid a timeout during this process, enter the following commands: When the process is complete, the hypershift-addon managed cluster add-on and the HyperShift Operator are installed, and the local-cluster managed cluster is available to host and manage hosted clusters. 1.8.12.4.2. Configuring the hypershift-addon managed cluster add-on to run on an infrastructure node By default, no node placement preference is specified for the hypershift-addon managed cluster add-on. Consider running the add-ons on the infrastructure nodes, because by doing so, you can prevent incurring billing costs against subscription counts and separate maintenance and management tasks. Log in to the hub cluster. Open the hypershift-addon-deploy-config add-on deployment configuration specification for editing by entering the following command: Add the nodePlacement field to the specification, as shown in the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: hypershift-addon-deploy-config namespace: multicluster-engine spec: nodePlacement: nodeSelector: node-role.kubernetes.io/infra: "" tolerations: - effect: NoSchedule key: node-role.kubernetes.io/infra operator: Exists Save the changes. The hypershift-addon managed cluster add-on is deployed on an infrastructure node for new and existing managed clusters. 1.8.12.5. Configuring hosted control planes on an IPv4 network IPv4 is one of the simplest network configurations to deploy hosted control planes in a disconnected environment. IPv4 ranges require fewer external components than IPv6 or dual-stack setups. The process to configure hosted control planes on an IPv4 network involves the following steps, which are described in detail in the sections that follow: Configure the hypervisor for an IPv4 network Configuring DNS for an IPv4 network Deploying a registry for an IPv4 network Setting up a management cluster for an IPv4 network Configuring the web server for an IPv4 network Configuring image mirroring for an IPv4 network Deploying multicluster engine operator for an IPv4 network Configuring TLS certificates for an IPv4 network Deploying the hosted cluster for an IPv4 network Finishing the deployment for an IPv4 network 1.8.12.5.1. Configuring the hypervisor for an IPv4 network The following information applies to virtual machine environments only. 1.8.12.5.1.1. Accessing and deploying packages for a virtual OpenShift Container Platform cluster To deploy a virtual OpenShift Container Platform management cluster, access the required packages by entering the following command: sudo dnf install dnsmasq radvd vim golang podman bind-utils net-tools httpd-tools tree htop strace tmux -y Enable and start the Podman service by entering the following command: systemctl enable --now podman To use kcli to deploy the OpenShift Container Platform management cluster and other virtual components, install and configure the hypervisor by entering the following commands: sudo yum -y install libvirt libvirt-daemon-driver-qemu qemu-kvm sudo usermod -aG qemu,libvirt USD(id -un) sudo newgrp libvirt sudo systemctl enable --now libvirtd sudo dnf -y copr enable karmab/kcli sudo dnf -y install kcli sudo kcli create pool -p /var/lib/libvirt/images default kcli create host kvm -H 127.0.0.1 local sudo setfacl -m u:USD(id -un):rwx /var/lib/libvirt/images kcli create network -c 192.168.122.0/24 default 1.8.12.5.1.2. Enabling the network manager dispatcher Enable the network manager dispatcher to ensure that virtual machines can resolve the required domains, routes, and registries. To enable the network manager dispatcher, in the /etc/NetworkManager/dispatcher.d/ directory, create a script named forcedns that contains the following content, replacing values as necessary to match your environment: #!/bin/bash export IP="192.168.126.1" 1 export BASE_RESOLV_CONF="/run/NetworkManager/resolv.conf" if ! [[ `grep -q "USDIP" /etc/resolv.conf` ]]; then export TMP_FILE=USD(mktemp /etc/forcedns_resolv.conf.XXXXXX) cp USDBASE_RESOLV_CONF USDTMP_FILE chmod --reference=USDBASE_RESOLV_CONF USDTMP_FILE sed -i -e "s/dns.base.domain.name//" -e "s/search /& dns.base.domain.name /" -e "0,/nameserver/s/nameserver/& USDIP\n&/" USDTMP_FILE 2 mv USDTMP_FILE /etc/resolv.conf fi echo "ok" 1 Modify the IP variable to point to the IP address of the hypervisor interface that hosts the OpenShift Container Platform management cluster. 2 Replace dns.base.domain.name with the DNS base domain name. After you create the file, add permissions by entering the following command: chmod 755 /etc/NetworkManager/dispatcher.d/forcedns Run the script and verify that the output returns ok . 1.8.12.5.1.3. Configure BMC access Configure ksushy to simulate baseboard management controllers (BMCs) for the virtual machines. Enter the following commands: sudo dnf install python3-pyOpenSSL.noarch python3-cherrypy -y kcli create sushy-service --ssl --port 9000 sudo systemctl daemon-reload systemctl enable --now ksushy Test whether the service is correctly functioning by entering the following command: systemctl status ksushy 1.8.12.5.1.4. Configuring the hypervisor system to allow connections If you are working in a development environment, configure the hypervisor system to allow various types of connections through different virtual networks within the environment. Note: If you are working in a production environment, you must establish proper rules for the firewalld service and configure SELinux policies to maintain a secure environment. For SELinux, enter the following command: sed -i s/^SELINUX=.*USD/SELINUX=permissive/ /etc/selinux/config; setenforce 0 For firewalld , enter the following command: systemctl disable --now firewalld For libvirtd , enter the following commands: systemctl restart libvirtd systemctl enable --now libvirtd , configure DNS for your environment. 1.8.12.5.1.5. Additional resources For more information about kcli , see the official kcli documentation . 1.8.12.5.2. Configuring DNS for an IPv4 network This step is mandatory for both disconnected and connected environments in both virtual and bare metal environments. The key distinction between virtual and bare metal environment lies in the location where you configure the resources. In a bare-metal environment, use a solution like Bind rather than a lightweight solution like dnsmasq . To configure DNS for an IPv4 network on a virtual environment, see Default ingress and DNS behavior . To configure DNS for an IPv4 network on bare metal, see Configuring DNS on bare metal . , deploy a registry. 1.8.12.5.3. Deploying a registry for an IPv4 network For development environments, deploy a small, self-hosted registry by using a Podman container. For production environments, use an enterprise-hosted registry, such as Red Hat Quay, Nexus, or Artifactory. To deploy a small registry by using Podman, complete the following steps: As a privileged user, access the USD{HOME} directory and create the following script: #!/usr/bin/env bash set -euo pipefail PRIMARY_NIC=USD(ls -1 /sys/class/net | grep -v podman | head -1) export PATH=/root/bin:USDPATH export PULL_SECRET="/root/baremetal/hub/openshift_pull.json" 1 if [[ ! -f USDPULL_SECRET ]];then echo "Pull Secret not found, exiting..." exit 1 fi dnf -y install podman httpd httpd-tools jq skopeo libseccomp-devel export IP=USD(ip -o addr show USDPRIMARY_NIC | head -1 | awk '{print USD4}' | cut -d'/' -f1) REGISTRY_NAME=registry.USD(hostname --long) REGISTRY_USER=dummy REGISTRY_PASSWORD=dummy KEY=USD(echo -n USDREGISTRY_USER:USDREGISTRY_PASSWORD | base64) echo "{\"auths\": {\"USDREGISTRY_NAME:5000\": {\"auth\": \"USDKEY\", \"email\": \"[email protected]\"}}}" > /root/disconnected_pull.json mv USD{PULL_SECRET} /root/openshift_pull.json.old jq ".auths += {\"USDREGISTRY_NAME:5000\": {\"auth\": \"USDKEY\",\"email\": \"[email protected]\"}}" < /root/openshift_pull.json.old > USDPULL_SECRET mkdir -p /opt/registry/{auth,certs,data,conf} cat <<EOF > /opt/registry/conf/config.yml version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry delete: enabled: true http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 compatibility: schema1: enabled: true EOF openssl req -newkey rsa:4096 -nodes -sha256 -keyout /opt/registry/certs/domain.key -x509 -days 3650 -out /opt/registry/certs/domain.crt -subj "/C=US/ST=Madrid/L=San Bernardo/O=Karmalabs/OU=Guitar/CN=USDREGISTRY_NAME" -addext "subjectAltName=DNS:USDREGISTRY_NAME" cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ update-ca-trust extract htpasswd -bBc /opt/registry/auth/htpasswd USDREGISTRY_USER USDREGISTRY_PASSWORD podman create --name registry --net host --security-opt label=disable --replace -v /opt/registry/data:/var/lib/registry:z -v /opt/registry/auth:/auth:z -v /opt/registry/conf/config.yml:/etc/docker/registry/config.yml -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry" -e "REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -v /opt/registry/certs:/certs:z -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key docker.io/library/registry:latest [ "USD?" == "0" ] || !! systemctl enable --now registry 1 Replace the location of the PULL_SECRET with the appropriate location for your setup. Name the script file registry.sh and save it. When you run the script, it pulls in the following information: The registry name, based on the hypervisor hostname The necessary credentials and user access details Adjust permissions by adding the execution flag as follows: To run the script without any parameters, enter the following command: The script starts the server. The script uses a systemd service for management purposes. If you need to manage the script, you can use the following commands: The root folder for the registry is in the /opt/registry directory and contains the following subdirectories: certs contains the TLS certificates. auth contains the credentials. data contains the registry images. conf contains the registry configuration. 1.8.12.5.4. Setting up the management cluster for an IPv4 network To set up an OpenShift Container Platform management cluster, you can use dev-scripts, or if you are based on virtual machines, you can use the kcli tool. The following instructions are specific to the kcli tool. Ensure that the right networks are prepared for use in the hypervisor. The networks will host both the management and hosted clusters. Enter the following kcli command: where: -c specifies the CIDR for the network. -P dhcp=false configures the network to disable the DHCP, which is handled by the dnsmasq that you configured. -P dns=false configures the network to disable the DNS, which is also handled by the dnsmasq that you configured. --domain sets the domain to search. dns.base.domain.name is the DNS base domain name. ipv4 is the name of the network that you are creating. After the network is created, review the following output: Ensure that the pull secret and kcli plan files are in place so that you can deploy the OpenShift Container Platform management cluster: Confirm that the pull secret is in the same folder as the kcli plan, and that the pull secret file is named openshift_pull.json . Add the kcli plan, which contains the OpenShift Container Platform definition, in the mgmt-compact-hub-ipv4.yaml file. Ensure that you update the file contents to match your environment: plan: hub-ipv4 force: true version: nightly tag: "4.x.y-x86_64" 1 cluster: "hub-ipv4" domain: dns.base.domain.name api_ip: 192.168.125.10 ingress_ip: 192.168.125.11 disconnected_url: registry.dns.base.domain.name:5000 disconnected_update: true disconnected_user: dummy disconnected_password: dummy disconnected_operators_version: v4.14 disconnected_operators: - name: metallb-operator - name: lvms-operator channels: - name: stable-4.14 disconnected_extra_images: - quay.io/user-name/trbsht:latest - quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 dualstack: false disk_size: 200 extra_disks: [200] memory: 48000 numcpus: 16 ctlplanes: 3 workers: 0 manifests: extra-manifests metal3: true network: ipv4 users_dev: developer users_devpassword: developer users_admin: admin users_adminpassword: admin metallb_pool: ipv4-virtual-network metallb_ranges: - 192.168.125.150-192.168.125.190 metallb_autoassign: true apps: - users - lvms-operator - metallb-operator vmrules: - hub-bootstrap: nets: - name: ipv4 mac: aa:aa:aa:aa:02:10 - hub-ctlplane-0: nets: - name: ipv4 mac: aa:aa:aa:aa:02:01 - hub-ctlplane-1: nets: - name: ipv4 mac: aa:aa:aa:aa:02:02 - hub-ctlplane-2: nets: - name: ipv4 mac: aa:aa:aa:aa:02:03 1 Replace 4.x.y with the supported OpenShift Container Platform version you want to use. To provision the management cluster, enter the following command: 1.8.12.5.4.1. Additional resources For more information about the parameters in the kcli plan file, see Create a parameters.yml in the official kcli documentation. 1.8.12.5.5. Configuring the web server for an IPv4 network You need to configure an additional web server to host the Red Hat Enterprise Linux CoreOS (RHCOS) images that are associated with the OpenShift Container Platform release that you are deploying as a hosted cluster. To configure the web server, complete the following steps: Extract the openshift-install binary from the OpenShift Container Platform release that you want to use by entering the following command: oc adm -a USD{LOCAL_SECRET_JSON} release extract --command=openshift-install "USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}" Run the following script. The script creates a folder in the /opt/srv directory. The folder contains the RHCOS images to provision the worker nodes. #!/bin/bash WEBSRV_FOLDER=/opt/srv ROOTFS_IMG_URL="USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.pxe.rootfs.location')" 1 LIVE_ISO_URL="USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.iso.disk.location')" 2 mkdir -p USD{WEBSRV_FOLDER}/images curl -Lk USD{ROOTFS_IMG_URL} -o USD{WEBSRV_FOLDER}/images/USD{ROOTFS_IMG_URL##*/} curl -Lk USD{LIVE_ISO_URL} -o USD{WEBSRV_FOLDER}/images/USD{LIVE_ISO_URL##*/} chmod -R 755 USD{WEBSRV_FOLDER}/* ## Run Webserver podman ps --noheading | grep -q websrv-ai if [[ USD? == 0 ]];then echo "Launching Registry pod..." /usr/bin/podman run --name websrv-ai --net host -v /opt/srv:/usr/local/apache2/htdocs:z quay.io/alosadag/httpd:p8080 fi 1 You can find the ROOTFS_IMG_URL value on the OpenShift CI Release page. 2 You can find the LIVE_ISO_URL value on the OpenShift CI Release page. After the download is completed, a container runs to host the images on a web server. The container uses a variation of the official HTTPd image, which also enables it to work with IPv6 networks. 1.8.12.5.6. Configuring image mirroring for an IPv4 network Image mirroring is the process of fetching images from external registries, such as registry.redhat.com or quay.io , and storing them in your private registry. 1.8.12.5.6.1. Completing the mirroring process Note: Start the mirroring process after the registry server is running. In the following procedures, the oc-mirror tool is used, which is a binary that uses the ImageSetConfiguration object. In the file, you can specify the following information: The OpenShift Container Platform versions to mirror. The versions are in quay.io . The additional Operators to mirror. Select packages individually. The extra images that you want to add to the repository. To configure image mirroring, complete the following steps: Ensure that your USD{HOME}/.docker/config.json file is updated with the registries that you are going to mirror from and with the private registry that you plan to push the images to. By using the following example, create an ImageSetConfiguration object to use for mirroring. Replace values as needed to match your environment: 1 Replace dns.base.domain.name with the DNS base domain name. 2 Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Start the mirroring process by entering the following command: After the mirroring process is finished, you have a new folder named oc-mirror-workspace/results-XXXXXX/ , which contains the ICSP and the catalog sources to apply on the hosted cluster. Mirror the nightly or CI versions of OpenShift Container Platform by using the oc adm release mirror command. Enter the following command: Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Mirror the latest multicluster engine operator images by following the steps in Install on disconnected networks . 1.8.12.5.6.2. Applying objects in the management cluster After the mirroring process is complete, you need to apply two objects in the management cluster: Image Content Source Policies (ICSP) or Image Digest Mirror Set (IDMS) Catalog sources When you use the oc-mirror tool, the output artifacts are in a folder named oc-mirror-workspace/results-XXXXXX/ . ICSP or IDMS initiates a MachineConfig change that does not restart your nodes but restarts the kubelet on each of them. After the nodes are marked as READY , you need to apply the newly generated catalog sources. The catalog sources initiate actions in the openshift-marketplace Operator, such as downloading the catalog image and processing it to retrieve all the PackageManifests that are included in that image. To check the new sources, run the following command by using the new CatalogSource as a source: To apply the artifacts, complete the following steps: Create the ImageContentSourcePolicy (ICSP) or IDMS artifacts by entering the following command: Wait for the nodes to become ready, and then enter the following command: Mirror the OLM catalogs and configure the hosed cluster to point to the mirror. When you use the management (default) OLMCatalogPlacement mode, the image stream that is used for OLM catalogs is not automatically amended with override information from the ICSP on the management cluster. If the OLM catalogs are properly mirrored to an internal registry by using the original name and tag, add the hypershift.openshift.io/olm-catalogs-is-registry-overrides annotation to the HostedCluster resource. The format is "sr1=dr1,sr2=dr2" , where the source registry string is a key and the destination registry is a value. To bypass the OLM catalog image stream mechanism, use the following four annotations on the HostedCluster resource to directly specify the addresses of the four images to use for OLM operator catalogs: hypershift.openshift.io/certified-operators-catalog-image hypershift.openshift.io/community-operators-catalog-image hypershift.openshift.io/redhat-marketplace-catalog-image hypershift.openshift.io/redhat-operators-catalog-image In this case, the image stream is not created, and you must update the value of the annotations when the internal mirror is refreshed to pull in operator updates. Note: If the override mechanism is required, all four values for the four default catalog sources are needed. 1.8.12.5.6.3. Additional resources If you are working in a virtual environment, after you configure mirroring, ensure that you meet the prerequisites for hosted control planes on OpenShift Virtualization . For more information about mirroring nightly or CI versions of OpenShift Container Platform, see Mirroring images for a disconnected installation using the oc-mirror plugin . 1.8.12.5.7. Deploying multicluster engine operator for an IPv4 network The multicluster engine operator plays a crucial role in deploying clusters across providers. If you already installed Red Hat Advanced Cluster Management, you do not need to install multicluster engine operator because it is automatically installed. If you do not have multicluster engine operator installed, review the following documentation to understand the prerequisites and steps to install it: About cluster lifecycle with multicluster engine operator Installing and upgrading multicluster engine operator 1.8.12.5.7.1. Deploying AgentServiceConfig resources The AgentServiceConfig custom resource is an essential component of the Assisted Service add-on that is part of multicluster engine operator. It is responsible for bare metal cluster deployment. When the add-on is enabled, you deploy the AgentServiceConfig resource to configure the add-on. In addition to configuring the AgentServiceConfig resource, you need to include additional config maps to ensure that multicluster engine operator functions properly in a disconnected environment. Configure the custom registries by adding the following config map, which contains the disconnected details to customize the deployment: --- apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] [[registry]] prefix = "" location = "registry.redhat.io/openshift4" mirror-by-digest-only = true [[registry.mirror]] location = "registry.dns.base.domain.name:5000/openshift4" 1 [[registry]] prefix = "" location = "registry.redhat.io/rhacm2" mirror-by-digest-only = true ... ... 1 Replace dns.base.domain.name with the DNS base domain name. The object contains two fields: Custom CAs: This field contains the Certificate Authorities (CAs) that are loaded into the various processes of the deployment. Registries: The Registries.conf field contains information about images and namespaces that need to be consumed from a mirror registry rather than the original source registry. Configure the Assisted Service by adding the AssistedServiceConfig object, as shown in the following example: --- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: unsupported.agent-install.openshift.io/assisted-service-configmap: assisted-service-config 1 name: agent namespace: multicluster-engine spec: mirrorRegistryRef: name: custom-registries 2 databaseStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 10Gi filesystemStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 20Gi osImages: 3 - cpuArchitecture: x86_64 openshiftVersion: "4.14" rootFSUrl: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live-rootfs.x86_64.img 4 url: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live.x86_64.iso version: 414.92.202308281054-0 1 The metadata.annotations["unsupported.agent-install.openshift.io/assisted-service-configmap"] annotation references the config map name that the Operator consumes to customize behavior. 2 The spec.mirrorRegistryRef.name annotation points to the config map that contains disconnected registry information that the Assisted Service Operator consumes. This config map adds those resources during the deployment process. 3 The spec.osImages field contains different versions available for deployment by this Operator. This field is mandatory. This example assumes that you already downloaded the RootFS and LiveISO files. 4 In the rootFSUrl and url fields, replace dns.base.domain.name with the DNS base domain name. Deploy all of the objects by concatenating them into a single file and applying them to the management cluster. To do so, enter the following command: The command triggers two pods, as shown in this example output: 1 The assisted-image-service pod is responsible for creating the Red Hat Enterprise Linux CoreOS (RHCOS) boot image template, which is customized for each cluster that you deploy. 2 The assisted-service refers to the Operator. 1.8.12.5.8. Configuring TLS certificates for an IPv4 network Several TLS certificates are involved in the process to configure hosted control planes in a disconnected environment. To add a Certificate Authority (CA) to the management cluster, you need to modify the content of the following files in the OpenShift Container Platform control plane and worker nodes: /etc/pki/ca-trust/extracted/pem/ /etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ To add a CA to the management cluster, complete the following steps: Complete the steps in Updating the CA bundle in the official OpenShift Container Platform documentation. That method involves using the image-registry-operator , which deploys the CAs to the OpenShift Container Platform nodes. If that method does not apply to your situation, check whether the openshift-config namespace in the management cluster contains a config map named user-ca-bundle . If the namespace contains that config map, enter the following command: If the namespace does not contain that config map, enter the following command: 1.8.12.5.9. Deploying the hosted cluster for an IPv4 network A hosted cluster is an OpenShift Container Platform cluster with its control plane and API endpoint hosted on a management cluster. The hosted cluster includes the control plane and its corresponding data plane. Although you can use the console in Red Hat Advanced Cluster Management to create a hosted cluster, the following procedures use manifests, which provide more flexibility for modifying the related artifacts. 1.8.12.5.9.1. Deploying hosted cluster objects For the purposes of this procedure, the following values are used: HostedCluster name: hosted-ipv4 HostedCluster namespace: clusters Disconnected: true Network stack: IPv4 Typically, the HyperShift Operator creates the HostedControlPlane namespace. However, in this case, you want to include all the objects before the HyperShift Operator begins to reconcile the HostedCluster object. Then, when the Operator starts the reconciliation process, it can find all of the objects in place. Create a YAML file with the following information about the namespaces: --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters-hosted-ipv4 spec: {} status: {} --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {} Create a YAML file with the following information about the config maps and secrets to include in the HostedCluster deployment: --- apiVersion: v1 data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- kind: ConfigMap metadata: name: user-ca-bundle namespace: clusters --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxx kind: Secret metadata: creationTimestamp: null name: hosted-ipv4-pull-secret namespace: clusters --- apiVersion: v1 kind: Secret metadata: name: sshkey-cluster-hosted-ipv4 namespace: clusters stringData: id_rsa.pub: ssh-rsa xxxxxxxxx --- apiVersion: v1 data: key: nTPtVBEt03owkrKhIdmSW8jrWRxU57KO/fnZa8oaG0Y= kind: Secret metadata: creationTimestamp: null name: hosted-ipv4-etcd-encryption-key namespace: clusters type: Opaque Create a YAML file that contains the RBAC roles so that Assisted Service agents can be in the same HostedControlPlane namespace as the hosted control plane and still be managed by the cluster API: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null name: capi-provider-role namespace: clusters-hosted-ipv4 rules: - apiGroups: - agent-install.openshift.io resources: - agents verbs: - '*' Create a YAML file with information about the HostedCluster object, replacing values as necessary: apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: hosted-ipv4 namespace: clusters spec: additionalTrustBundle: name: "user-ca-bundle" olmCatalogPlacement: guest imageContentSources: 1 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev mirrors: - registry.dns.base.domain.name:5000/openshift/release - source: quay.io/openshift-release-dev/ocp-release mirrors: - registry.dns.base.domain.name:5000/openshift/release-images - mirrors: ... ... autoscaling: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: dns.base.domain.name etcd: managed: storage: persistentVolume: size: 8Gi restoreSnapshotURL: null type: PersistentVolume managementType: Managed fips: false networking: clusterNetwork: - cidr: 10.132.0.0/14 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 platform: agent: agentNamespace: clusters-hosted-ipv4 type: Agent pullSecret: name: hosted-ipv4-pull-secret release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 secretEncryption: aescbc: activeKey: name: hosted-ipv4-etcd-encryption-key type: aescbc services: - service: APIServer servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: OAuthServer servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: OIDC servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: Konnectivity servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: Ignition servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort sshKey: name: sshkey-cluster-hosted-ipv4 status: controlPlaneEndpoint: host: "" port: 0 where dns.base.domain.name is the DNS base domain name and 4.x.y is the supported OpenShift Container Platform version you want to use. 1 The imageContentSources section contains mirror references for user workloads within the hosted cluster. Add an annotation in the HostedCluster object that points to the HyperShift Operator release in the OpenShift Container Platform release: Obtain the image payload by entering the following command: where dns.base.domain.name is the DNS base domain name and 4.x.y is the supported OpenShift Container Platform version you want to use. See the following output: By using the OpenShift Container Platform Images namespace, check the digest by entering the following command: where dns.base.domain.name is the DNS base domain name. See the following output: Note: The release image that is set in the HostedCluster object must use the digest rather than the tag; for example, quay.io/openshift-release-dev/ocp-release@sha256:e3ba11bd1e5e8ea5a0b36a75791c90f29afb0fdbe4125be4e48f69c76a5c47a0 . Create all of the objects that you defined in the YAML files by concatenating them into a file and applying them against the management cluster. To do so, enter the following command: See the output for the hosted control plane: See the output for the hosted cluster: , create a NodePool object. 1.8.12.5.9.2. Creating a NodePool object for the hosted cluster A NodePool is a scalable set of worker nodes that is associated with a hosted cluster. NodePool machine architectures remain consistent within a specific pool and are independent of the machine architecture of the control plane. Create a YAML file with the following information about the NodePool object, replacing values as necessary: apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: hosted-ipv4 namespace: clusters spec: arch: amd64 clusterName: hosted-ipv4 management: autoRepair: false 1 upgradeType: InPlace 2 nodeDrainTimeout: 0s platform: type: Agent release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 replicas: 0 status: replicas: 0 4 1 The autoRepair field is set to false because the node will not be re-created if it is removed. 2 The upgradeType is set to InPlace , which indicates that the same bare metal node is reused during an upgrade. 3 All of the nodes included in this NodePool are based on the following OpenShift Container Platform version: 4.x.y-x86_64 . Replace dns.base.domain.name with the DNS base domain name and 4.x.y with the supported OpenShift Container Platform version you want to use. 4 The replicas value is set to 0 so that you can scale them when needed. It is important to keep the NodePool replicas at 0 until all steps are completed. Create the NodePool object by entering the following command: See the output: Replace 4.x.y with the supported OpenShift Container Platform version you want to use. , create an InfraEnv resource. 1.8.12.5.9.3. Creating an InfraEnv resource for the hosted cluster The InfraEnv resource is an Assisted Service object that includes essential details, such as the pullSecretRef and the sshAuthorizedKey . Those details are used to create the Red Hat Enterprise Linux CoreOS (RHCOS) boot image that is customized for the hosted cluster. Create a YAML file with the following information about the InfraEnv resource, replacing values as necessary: --- apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: hosted-ipv4 namespace: clusters-hosted-ipv4 spec: pullSecretRef: 1 name: pull-secret sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk7ICaUE+/k4zTpxLk4+xFdHi4ZuDi5qjeF52afsNkw0w/glILHhwpL5gnp5WkRuL8GwJuZ1VqLC9EKrdmegn4MrmUlq7WTsP0VFOZFBfq2XRUxo1wrRdor2z0Bbh93ytR+ZsDbbLlGngXaMa0Vbt+z74FqlcajbHTZ6zBmTpBVq5RHtDPgKITdpE1fongp7+ZXQNBlkaavaqv8bnyrP4BWahLP4iO9/xJF9lQYboYwEEDzmnKLMW1VtCE6nJzEgWCufACTbxpNS7GvKtoHT/OVzw8ArEXhZXQUS1UY8zKsX2iXwmyhw5Sj6YboA8WICs4z+TrFP89LmxXY0j6536TQFyRz1iB4WWvCbH5n6W+ABV2e8ssJB1AmEy8QYNwpJQJNpSxzoKBjI73XxvPYYC/IjPFMySwZqrSZCkJYqQ023ySkaQxWZT7in4KeMu7eS2tC+Kn4deJ7KwwUycx8n6RHMeD8Qg9flTHCv3gmab8JKZJqN3hW1D378JuvmIX4V0= 2 1 The pullSecretRef refers to the config map reference in the same namespace as the InfraEnv , where the pull secret is used. 2 The sshAuthorizedKey represents the SSH public key that is placed in the boot image. The SSH key allows access to the worker nodes as the core user. Create the InfraEnv resource by entering the following command: See the following output: , create worker nodes. 1.8.12.5.9.4. Creating worker nodes for the hosted cluster If you are working on a bare metal platform, creating worker nodes is crucial to ensure that the details in the BareMetalHost are correctly configured. If you are working with virtual machines, you can complete the following steps to create empty worker nodes that the Metal3 Operator consumes. To do so, you use kcli . If this is not your first attempt to create worker nodes, you must first delete your setup. To do so, delete the plan by entering the following command: When you are prompted to confirm whether you want to delete the plan, type y . Confirm that you see a message stating that the plan was deleted. Create the virtual machines by entering the following commands: where: start=False means that the virtual machine (VM) will not automatically start upon creation. uefi_legacy=true means that you will use UEFI legacy boot to ensure compatibility with UEFI implementations. plan=hosted-dual indicates the plan name, which identifies a group of machines as a cluster. memory=8192 and numcpus=16 are parameters that specify the resources for the VM, including the RAM and CPU. disks=[200,200] indicates that you are creating two thin-provisioned disks in the VM. nets=[{"name": "dual", "mac": "aa:aa:aa:aa:02:13"}] are network details, including the network name to connect to and the MAC address of the primary interface. restart ksushy restarts the ksushy tool to ensure that the tool detects the VMs that you added. See the resulting output: , create bare metal hosts for the hosted cluster. 1.8.12.5.9.5. Creating bare metal hosts for the hosted cluster A bare metal host is an openshift-machine-api object that encompasses physical and logical details so that it can be identified by a Metal3 Operator. Those details are associated with other Assisted Service objects, known as agents . Important: Before you create the bare metal host and destination nodes, you must create the virtual machines. To create a bare metal host, complete the following steps: Create a YAML file with the following information: Note: Because you have at least one secret that holds the bare metal host credentials, you need to create at least two objects for each worker node. --- apiVersion: v1 kind: Secret metadata: name: hosted-ipv4-worker0-bmc-secret namespace: clusters-hosted-ipv4 data: password: YWRtaW4= username: YWRtaW4= type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: hosted-ipv4-worker0 namespace: clusters-hosted-ipv4 labels: infraenvs.agent-install.openshift.io: hosted-ipv4 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: hosted-ipv4-worker0 2 spec: automatedCleaningMode: disabled 3 bmc: disableCertificateVerification: true 4 address: redfish-virtualmedia://[192.168.125.1]:9000/redfish/v1/Systems/local/hosted-ipv4-worker0 5 credentialsName: hosted-ipv4-worker0-bmc-secret 6 bootMACAddress: aa:aa:aa:aa:02:11 7 online: true 8 1 infraenvs.agent-install.openshift.io serves as the link between the Assisted Installer and the BareMetalHost objects. 2 bmac.agent-install.openshift.io/hostname represents the node name that is adopted during deployment. 3 automatedCleaningMode prevents the node from being erased by the Metal3 Operator. 4 disableCertificateVerification is set to true to bypass certificate validation from the client. 5 address denotes the baseboard management controller (BMC) address of the worker node. 6 credentialsName points to the secret where the user and password credentials are stored. 7 bootMACAddress indicates the interface MACAddress that the node starts from. 8 online defines the state of the node after the BareMetalHost object is created. Deploy the BareMetalHost object by entering the following command: During the process, you can view the following output: This output indicates that the process is trying to reach the nodes: This output indicates that the nodes are starting: This output indicates that the nodes started successfully: After the nodes start, notice the agents in the namespace, as shown in this example: The agents represent nodes that are available for installation. To assign the nodes to a hosted cluster, scale up the node pool. 1.8.12.5.9.6. Scaling up the node pool After you create the bare metal hosts, their statuses change from Registering to Provisioning to Provisioned . The nodes start with the LiveISO of the agent and a default pod that is named agent . That agent is responsible for receiving instructions from the Assisted Service Operator to install the OpenShift Container Platform payload. To scale up the node pool, enter the following command: After the scaling process is complete, notice that the agents are assigned to a hosted cluster: Also notice that the node pool replicas are set: Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Wait until the nodes join the cluster. During the process, the agents provide updates on their stage and status. , monitor the deployment of the hosted cluster. 1.8.12.5.10. Finishing the hosted cluster deployment for an IPv4 network You can monitor the deployment of a hosted cluster from two perspectives: the control plane and the data plane. 1.8.12.5.10.1. Monitoring the control plane While the hosted cluster is deploying, you can enter the following commands to monitor the control plane: Those commands provide information about the following artifacts: The HyperShift Operator The HostedControlPlane pod The bare metal hosts The agents The InfraEnv resource The HostedCluster and NodePool resources 1.8.12.5.10.2. Monitoring the data plane To monitor how the Operators are progressing during the deployment process, enter the following commands: Those commands provide information about the following artifacts: The cluster version The nodes, specifically, about whether the nodes joined the cluster The cluster Operators 1.8.12.6. Configuring hosted control planes on an IPv6 network The IPv6 network configuration is currently designated as disconnected. The primary reason for this designation is because remote registries do not function with IPv6. The process to configure hosted control planes on an IPv6 network involves the following steps, which are described in detail in the sections that follow: Configure the hypervisor for an IPv6 network Configuring DNS for an IPv6 network Deploying a registry for an IPv6 network Setting up a management cluster for an IPv6 network Configuring the web server for an IPv6 network Configuring image mirroring for an IPv6 network Deploying multicluster engine operator for an IPv6 network Configuring TLS certificates for an IPv6 network Deploying the hosted cluster for an IPv6 network Finishing the deployment for an IPv6 network 1.8.12.6.1. Configuring the hypervisor for an IPv6 network The following information applies to virtual machine environments only. 1.8.12.6.1.1. Accessing and deploying packages for a virtual OpenShift Container Platform cluster To deploy a virtual OpenShift Container Platform management cluster, access the required packages by entering the following command: sudo dnf install dnsmasq radvd vim golang podman bind-utils net-tools httpd-tools tree htop strace tmux -y Enable and start the Podman service by entering the following command: systemctl enable --now podman To use kcli to deploy the OpenShift Container Platform management cluster and other virtual components, install and configure the hypervisor by entering the following commands: sudo yum -y install libvirt libvirt-daemon-driver-qemu qemu-kvm sudo usermod -aG qemu,libvirt USD(id -un) sudo newgrp libvirt sudo systemctl enable --now libvirtd sudo dnf -y copr enable karmab/kcli sudo dnf -y install kcli sudo kcli create pool -p /var/lib/libvirt/images default kcli create host kvm -H 127.0.0.1 local sudo setfacl -m u:USD(id -un):rwx /var/lib/libvirt/images kcli create network -c 192.168.122.0/24 default 1.8.12.6.1.2. Enabling the network manager dispatcher Enable the network manager dispatcher to ensure that virtual machines can resolve the required domains, routes, and registries. To enable the network manager dispatcher, in the /etc/NetworkManager/dispatcher.d/ directory, create a script named forcedns that contains the following content, replacing values as necessary to match your environment: #!/bin/bash export IP="2620:52:0:1306::1" 1 export BASE_RESOLV_CONF="/run/NetworkManager/resolv.conf" if ! [[ `grep -q "USDIP" /etc/resolv.conf` ]]; then export TMP_FILE=USD(mktemp /etc/forcedns_resolv.conf.XXXXXX) cp USDBASE_RESOLV_CONF USDTMP_FILE chmod --reference=USDBASE_RESOLV_CONF USDTMP_FILE sed -i -e "s/dns.base.domain.name//" -e "s/search /& dns.base.domain.name /" -e "0,/nameserver/s/nameserver/& USDIP\n&/" USDTMP_FILE 2 mv USDTMP_FILE /etc/resolv.conf fi echo "ok" 1 Modify the IP variable to point to the IP address of the hypervisor interface that hosts the OpenShift Container Platform management cluster. 2 Replace dns.base.domain.name with the DNS base domain name. After you create the file, add permissions by entering the following command: chmod 755 /etc/NetworkManager/dispatcher.d/forcedns Run the script and verify that the output returns ok . 1.8.12.6.1.3. Configure BMC access Configure ksushy to simulate baseboard management controllers (BMCs) for the virtual machines. Enter the following commands: sudo dnf install python3-pyOpenSSL.noarch python3-cherrypy -y kcli create sushy-service --ssl --ipv6 --port 9000 sudo systemctl daemon-reload systemctl enable --now ksushy Test whether the service is correctly functioning by entering the following command: systemctl status ksushy 1.8.12.6.1.4. Configuring the hypervisor system to allow connections If you are working in a development environment, configure the hypervisor system to allow various types of connections through different virtual networks within the environment. Note: If you are working in a production environment, you must establish proper rules for the firewalld service and configure SELinux policies to maintain a secure environment. For SELinux, enter the following command: sed -i s/^SELINUX=.*USD/SELINUX=permissive/ /etc/selinux/config; setenforce 0 For firewalld , enter the following command: systemctl disable --now firewalld For libvirtd , enter the following commands: systemctl restart libvirtd systemctl enable --now libvirtd , configure DNS for your environment. 1.8.12.6.1.5. Additional resources For more information about kcli , see the official kcli documentation . 1.8.12.6.2. Configuring DNS for an IPv6 network This step is mandatory for both disconnected and connected environments in both virtual and bare metal environments. The key distinction between virtual and bare metal environment lies in the location where you configure the resources. In a bare-metal environment, use a solution like Bind rather than a lightweight solution like dnsmasq . To configure DNS for an IPv6 network on a virtual environment, see Default ingress and DNS behavior . To configure DNS for an IPv6 network on bare metal, see Configuring DNS on bare metal . , deploy a registry. 1.8.12.6.3. Deploying a registry for an IPv6 network For development environments, deploy a small, self-hosted registry by using a Podman container. For production environments, use an enterprise-hosted registry, such as Red Hat Quay, Nexus, or Artifactory. To deploy a small registry by using Podman, complete the following steps: As a privileged user, access the USD{HOME} directory and create the following script: #!/usr/bin/env bash set -euo pipefail PRIMARY_NIC=USD(ls -1 /sys/class/net | grep -v podman | head -1) export PATH=/root/bin:USDPATH export PULL_SECRET="/root/baremetal/hub/openshift_pull.json" 1 if [[ ! -f USDPULL_SECRET ]];then echo "Pull Secret not found, exiting..." exit 1 fi dnf -y install podman httpd httpd-tools jq skopeo libseccomp-devel export IP=USD(ip -o addr show USDPRIMARY_NIC | head -1 | awk '{print USD4}' | cut -d'/' -f1) REGISTRY_NAME=registry.USD(hostname --long) REGISTRY_USER=dummy REGISTRY_PASSWORD=dummy KEY=USD(echo -n USDREGISTRY_USER:USDREGISTRY_PASSWORD | base64) echo "{\"auths\": {\"USDREGISTRY_NAME:5000\": {\"auth\": \"USDKEY\", \"email\": \"[email protected]\"}}}" > /root/disconnected_pull.json mv USD{PULL_SECRET} /root/openshift_pull.json.old jq ".auths += {\"USDREGISTRY_NAME:5000\": {\"auth\": \"USDKEY\",\"email\": \"[email protected]\"}}" < /root/openshift_pull.json.old > USDPULL_SECRET mkdir -p /opt/registry/{auth,certs,data,conf} cat <<EOF > /opt/registry/conf/config.yml version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry delete: enabled: true http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 compatibility: schema1: enabled: true EOF openssl req -newkey rsa:4096 -nodes -sha256 -keyout /opt/registry/certs/domain.key -x509 -days 3650 -out /opt/registry/certs/domain.crt -subj "/C=US/ST=Madrid/L=San Bernardo/O=Karmalabs/OU=Guitar/CN=USDREGISTRY_NAME" -addext "subjectAltName=DNS:USDREGISTRY_NAME" cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ update-ca-trust extract htpasswd -bBc /opt/registry/auth/htpasswd USDREGISTRY_USER USDREGISTRY_PASSWORD podman create --name registry --net host --security-opt label=disable --replace -v /opt/registry/data:/var/lib/registry:z -v /opt/registry/auth:/auth:z -v /opt/registry/conf/config.yml:/etc/docker/registry/config.yml -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry" -e "REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -v /opt/registry/certs:/certs:z -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key docker.io/library/registry:latest [ "USD?" == "0" ] || !! systemctl enable --now registry 1 Replace the location of the PULL_SECRET with the appropriate location for your setup. Name the script file registry.sh and save it. When you run the script, it pulls in the following information: The registry name, based on the hypervisor hostname The necessary credentials and user access details Adjust permissions by adding the execution flag as follows: To run the script without any parameters, enter the following command: The script starts the server. The script uses a systemd service for management purposes. If you need to manage the script, you can use the following commands: The root folder for the registry is in the /opt/registry directory and contains the following subdirectories: certs contains the TLS certificates. auth contains the credentials. data contains the registry images. conf contains the registry configuration. 1.8.12.6.4. Setting up the management cluster for an IPv6 network To set up an OpenShift Container Platform management cluster, you can use dev-scripts, or if you are based on virtual machines, you can use the kcli tool. The following instructions are specific to the kcli tool. Ensure that the right networks are prepared for use in the hypervisor. The networks will host both the management and hosted clusters. Enter the following kcli command: where: -c specifies the CIDR for the network. -P dhcp=false configures the network to disable the DHCP, which is handled by the dnsmasq that you configured. -P dns=false configures the network to disable the DNS, which is also handled by the dnsmasq that you configured. --domain sets the domain to search. dns.base.domain.name is the DNS base domain name. ipv6 is the name of the network that you are creating. After the network is created, review the following output: Ensure that the pull secret and kcli plan files are in place so that you can deploy the OpenShift Container Platform management cluster: Confirm that the pull secret is in the same folder as the kcli plan, and that the pull secret file is named openshift_pull.json . Add the kcli plan, which contains the OpenShift Container Platform definition, in the mgmt-compact-hub-ipv6.yaml file. Ensure that you update the file contents to match your environment: plan: hub-ipv6 force: true version: nightly tag: "4.x.y-x86_64" cluster: "hub-ipv6" ipv6: true domain: dns.base.domain.name api_ip: 2620:52:0:1305::2 ingress_ip: 2620:52:0:1305::3 disconnected_url: registry.dns.base.domain.name:5000 disconnected_update: true disconnected_user: dummy disconnected_password: dummy disconnected_operators_version: v4.14 disconnected_operators: - name: metallb-operator - name: lvms-operator channels: - name: stable-4.14 disconnected_extra_images: - quay.io/user-name/trbsht:latest - quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 dualstack: false disk_size: 200 extra_disks: [200] memory: 48000 numcpus: 16 ctlplanes: 3 workers: 0 manifests: extra-manifests metal3: true network: ipv6 users_dev: developer users_devpassword: developer users_admin: admin users_adminpassword: admin metallb_pool: ipv6-virtual-network metallb_ranges: - 2620:52:0:1305::150-2620:52:0:1305::190 metallb_autoassign: true apps: - users - lvms-operator - metallb-operator vmrules: - hub-bootstrap: nets: - name: ipv6 mac: aa:aa:aa:aa:03:10 - hub-ctlplane-0: nets: - name: ipv6 mac: aa:aa:aa:aa:03:01 - hub-ctlplane-1: nets: - name: ipv6 mac: aa:aa:aa:aa:03:02 - hub-ctlplane-2: nets: - name: ipv6 mac: aa:aa:aa:aa:03:03 To provision the management cluster, enter the following command: 1.8.12.6.4.1. Additional resources For more information about the parameters in the kcli plan file, see Create a parameters.yml in the official kcli documentation. 1.8.12.6.5. Configuring the web server for an IPv6 network You need to configure an additional web server to host the Red Hat Enterprise Linux CoreOS (RHCOS) images that are associated with the OpenShift Container Platform release that you are deploying as a hosted cluster. To configure the web server, complete the following steps: Extract the openshift-install binary from the OpenShift Container Platform release that you want to use by entering the following command: oc adm -a USD{LOCAL_SECRET_JSON} release extract --command=openshift-install "USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}" Run the following script. The script creates a folder in the /opt/srv directory. The folder contains the RHCOS images to provision the worker nodes. #!/bin/bash WEBSRV_FOLDER=/opt/srv ROOTFS_IMG_URL="USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.pxe.rootfs.location')" 1 LIVE_ISO_URL="USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.iso.disk.location')" 2 mkdir -p USD{WEBSRV_FOLDER}/images curl -Lk USD{ROOTFS_IMG_URL} -o USD{WEBSRV_FOLDER}/images/USD{ROOTFS_IMG_URL##*/} curl -Lk USD{LIVE_ISO_URL} -o USD{WEBSRV_FOLDER}/images/USD{LIVE_ISO_URL##*/} chmod -R 755 USD{WEBSRV_FOLDER}/* ## Run Webserver podman ps --noheading | grep -q websrv-ai if [[ USD? == 0 ]];then echo "Launching Registry pod..." /usr/bin/podman run --name websrv-ai --net host -v /opt/srv:/usr/local/apache2/htdocs:z quay.io/alosadag/httpd:p8080 fi 1 You can find the ROOTFS_IMG_URL value on the OpenShift CI Release page. 2 You can find the LIVE_ISO_URL value on the OpenShift CI Release page. After the download is completed, a container runs to host the images on a web server. The container uses a variation of the official HTTPd image, which also enables it to work with IPv6 networks. 1.8.12.6.6. Configuring image mirroring for an IPv6 network Image mirroring is the process of fetching images from external registries, such as registry.redhat.com or quay.io , and storing them in your private registry. 1.8.12.6.6.1. Completing the mirroring process Note: Start the mirroring process after the registry server is running. In the following procedures, the oc-mirror tool is used, which is a binary that uses the ImageSetConfiguration object. In the file, you can specify the following information: The OpenShift Container Platform versions to mirror. The versions are in quay.io . The additional Operators to mirror. Select packages individually. The extra images that you want to add to the repository. To configure image mirroring, complete the following steps: Ensure that your USD{HOME}/.docker/config.json file is updated with the registries that you are going to mirror from and with the private registry that you plan to push the images to. By using the following example, create an ImageSetConfiguration object to use for mirroring. Replace values as needed to match your environment: 1 Replace dns.base.domain.name with the DNS base domain name and 4.x.y with the supported OpenShift Container Platform version you want to use. Start the mirroring process by entering the following command: After the mirroring process is finished, you have a new folder named oc-mirror-workspace/results-XXXXXX/ , which contains the ICSP and the catalog sources to apply on the hosted cluster. Mirror the nightly or CI versions of OpenShift Container Platform by using the oc adm release mirror command. Enter the following command: Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Mirror the latest multicluster engine operator images by following the steps in Install on disconnected networks . 1.8.12.6.6.2. Applying objects in the management cluster After the mirroring process is complete, you need to apply two objects in the management cluster: Image Content Source Policies (ICSP) or Image Digest Mirror Set (IDMS) Catalog sources When you use the oc-mirror tool, the output artifacts are in a folder named oc-mirror-workspace/results-XXXXXX/ . ICSP or IDMS initiates a MachineConfig change that does not restart your nodes but restarts the kubelet on each of them. After the nodes are marked as READY , you need to apply the newly generated catalog sources. The catalog sources initiate actions in the openshift-marketplace Operator, such as downloading the catalog image and processing it to retrieve all the PackageManifests that are included in that image. To check the new sources, run the following command by using the new CatalogSource as a source: To apply the artifacts, complete the following steps: Create the ICSP or IDMS artifacts by entering the following command: Wait for the nodes to become ready, and then enter the following command: 1.8.12.6.6.3. Additional resources If you are working in a virtual environment, after you configure mirroring, ensure that you meet the prerequisites for hosted control planes on OpenShift Virtualization . For more information about mirroring nightly or CI versions of OpenShift Container Platform, see Mirroring images for a disconnected installation using the oc-mirror plugin . 1.8.12.6.7. Deploying multicluster engine operator for an IPv6 network The multicluster engine operator plays a crucial role in deploying clusters across providers. If you already installed Red Hat Advanced Cluster Management, you do not need to install multicluster engine operator because it is automatically installed. If you do not have multicluster engine operator installed, review the following documentation to understand the prerequisites and steps to install it: About cluster lifecycle with multicluster engine operator Installing and upgrading multicluster engine operator 1.8.12.6.7.1. Deploying AgentServiceConfig resources The AgentServiceConfig custom resource is an essential component of the Assisted Service add-on that is part of multicluster engine operator. It is responsible for bare metal cluster deployment. When the add-on is enabled, you deploy the AgentServiceConfig resource to configure the add-on. In addition to configuring the AgentServiceConfig resource, you need to include additional config maps to ensure that multicluster engine operator functions properly in a disconnected environment. Configure the custom registries by adding the following config map, which contains the disconnected details to customize the deployment: --- apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] [[registry]] prefix = "" location = "registry.redhat.io/openshift4" mirror-by-digest-only = true [[registry.mirror]] location = "registry.dns.base.domain.name:5000/openshift4" 1 [[registry]] prefix = "" location = "registry.redhat.io/rhacm2" mirror-by-digest-only = true ... ... 1 Replace dns.base.domain.name with the DNS base domain name. The object contains two fields: Custom CAs: This field contains the Certificate Authorities (CAs) that are loaded into the various processes of the deployment. Registries: The Registries.conf field contains information about images and namespaces that need to be consumed from a mirror registry rather than the original source registry. Configure the Assisted Service by adding the AssistedServiceConfig object, as shown in the following example: --- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: unsupported.agent-install.openshift.io/assisted-service-configmap: assisted-service-config 1 name: agent namespace: multicluster-engine spec: mirrorRegistryRef: name: custom-registries 2 databaseStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 10Gi filesystemStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 20Gi osImages: 3 - cpuArchitecture: x86_64 openshiftVersion: "4.14" rootFSUrl: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live-rootfs.x86_64.img 4 url: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live.x86_64.iso version: 414.92.202308281054-0 1 The metadata.annotations["unsupported.agent-install.openshift.io/assisted-service-configmap"] annotation references the config map name that the Operator consumes to customize behavior. 2 The spec.mirrorRegistryRef.name annotation points to the config map that contains disconnected registry information that the Assisted Service Operator consumes. This config map adds those resources during the deployment process. 3 The spec.osImages field contains different versions available for deployment by this Operator. This field is mandatory. This example assumes that you already downloaded the RootFS and LiveISO files. 4 In the rootFSUrl and url fields, replace dns.base.domain.name with the DNS base domain name. Deploy all of the objects by concatenating them into a single file and applying them to the management cluster. To do so, enter the following command: The command triggers two pods, as shown in this example output: 1 The assisted-image-service pod is responsible for creating the Red Hat Enterprise Linux CoreOS (RHCOS) boot image template, which is customized for each cluster that you deploy. 2 The assisted-service refers to the Operator. 1.8.12.6.8. Configuring TLS certificates for an IPv6 network Several TLS certificates are involved in the process to configure hosted control planes in a disconnected environment. To add a Certificate Authority (CA) to the management cluster, you need to modify the content of the following files in the OpenShift Container Platform control plane and worker nodes: /etc/pki/ca-trust/extracted/pem/ /etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ To add a CA to the management cluster, complete the following steps: Complete the steps in Updating the CA bundle in the official OpenShift Container Platform documentation. That method involves using the image-registry-operator , which deploys the CAs to the OpenShift Container Platform nodes. If that method does not apply to your situation, check whether the openshift-config namespace in the management cluster contains a config map named user-ca-bundle . If the namespace contains that config map, enter the following command: If the namespace does not contain that config map, enter the following command: 1.8.12.6.9. Deploying the hosted cluster for an IPv6 network A hosted cluster is an OpenShift Container Platform cluster with its control plane and API endpoint hosted on a management cluster. The hosted cluster includes the control plane and its corresponding data plane. Although you can use the console in Red Hat Advanced Cluster Management to create a hosted cluster, the following procedures use manifests, which provide more flexibility for modifying the related artifacts. 1.8.12.6.9.1. Deploying hosted cluster objects For the purposes of this procedure, the following values are used: HostedCluster name: hosted-ipv6 HostedCluster namespace: clusters Disconnected: true Network stack: IPv6 Typically, the HyperShift Operator creates the HostedControlPlane namespace. However, in this case, you want to include all the objects before the HyperShift Operator begins to reconcile the HostedCluster object. Then, when the Operator starts the reconciliation process, it can find all of the objects in place. Create a YAML file with the following information about the namespaces: --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters-hosted-ipv6 spec: {} status: {} --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {} Create a YAML file with the following information about the config maps and secrets to include in the HostedCluster deployment: --- apiVersion: v1 data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- kind: ConfigMap metadata: name: user-ca-bundle namespace: clusters --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxx kind: Secret metadata: creationTimestamp: null name: hosted-ipv6-pull-secret namespace: clusters --- apiVersion: v1 kind: Secret metadata: name: sshkey-cluster-hosted-ipv6 namespace: clusters stringData: id_rsa.pub: ssh-rsa xxxxxxxxx --- apiVersion: v1 data: key: nTPtVBEt03owkrKhIdmSW8jrWRxU57KO/fnZa8oaG0Y= kind: Secret metadata: creationTimestamp: null name: hosted-ipv6-etcd-encryption-key namespace: clusters type: Opaque Create a YAML file that contains the RBAC roles so that Assisted Service agents can be in the same HostedControlPlane namespace as the hosted control plane and still be managed by the cluster API: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null name: capi-provider-role namespace: clusters-hosted-ipv6 rules: - apiGroups: - agent-install.openshift.io resources: - agents verbs: - '*' Create a YAML file with information about the HostedCluster object, replacing values as necessary: apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: hosted-ipv6 namespace: clusters annotations: hypershift.openshift.io/control-plane-operator-image: registry.ocp-edge-cluster-0.qe.lab.redhat.com:5005/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 spec: additionalTrustBundle: name: "user-ca-bundle" olmCatalogPlacement: guest imageContentSources: 1 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev mirrors: - registry.dns.base.domain.name:5000/openshift/release - source: quay.io/openshift-release-dev/ocp-release mirrors: - registry.dns.base.domain.name:5000/openshift/release-images - mirrors: ... ... autoscaling: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: dns.base.domain.name etcd: managed: storage: persistentVolume: size: 8Gi restoreSnapshotURL: null type: PersistentVolume managementType: Managed fips: false networking: clusterNetwork: - cidr: 10.132.0.0/14 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 platform: agent: agentNamespace: clusters-hosted-ipv6 type: Agent pullSecret: name: hosted-ipv6-pull-secret release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 secretEncryption: aescbc: activeKey: name: hosted-ipv6-etcd-encryption-key type: aescbc services: - service: APIServer servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: OAuthServer servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: OIDC servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: Konnectivity servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: Ignition servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort sshKey: name: sshkey-cluster-hosted-ipv6 status: controlPlaneEndpoint: host: "" port: 0 where dns.base.domain.name is the DNS base domain name and 4.x.y is the supported OpenShift Container Platform version you want to use. 1 The imageContentSources section contains mirror references for user workloads within the hosted cluster. Add an annotation in the HostedCluster object that points to the HyperShift Operator release in the OpenShift Container Platform release: Obtain the image payload by entering the following command: where dns.base.domain.name is the DNS base domain name and 4.x.y is the supported OpenShift Container Platform version you want to use. See the following output: By using the OpenShift Container Platform Images namespace, check the digest by entering the following command: where dns.base.domain.name is the DNS base domain name. See the following output: Note: The release image that is set in the HostedCluster object must use the digest rather than the tag; for example, quay.io/openshift-release-dev/ocp-release@sha256:e3ba11bd1e5e8ea5a0b36a75791c90f29afb0fdbe4125be4e48f69c76a5c47a0 . Create all of the objects that you defined in the YAML files by concatenating them into a file and applying them against the management cluster. To do so, enter the following command: See the output for the hosted control plane: See the output for the hosted cluster: , create a NodePool object. 1.8.12.6.9.2. Creating a NodePool object for the hosted cluster A NodePool is a scalable set of worker nodes that is associated with a hosted cluster. NodePool machine architectures remain consistent within a specific pool and are independent of the machine architecture of the control plane. Create a YAML file with the following information about the NodePool object, replacing values as necessary: apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: hosted-ipv6 namespace: clusters spec: arch: amd64 clusterName: hosted-ipv6 management: autoRepair: false 1 upgradeType: InPlace 2 nodeDrainTimeout: 0s platform: type: Agent release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 replicas: 0 status: replicas: 0 4 1 The autoRepair field is set to false because the node will not be re-created if it is removed. 2 The upgradeType is set to InPlace , which indicates that the same bare metal node is reused during an upgrade. 3 All of the nodes included in this NodePool are based on the following OpenShift Container Platform version: 4.x.y-x86_64 . Replace dns.base.domain.name with the DNS base domain name and 4.x.y with the supported OpenShift Container Platform version you want to use. 4 The replicas value is set to 0 so that you can scale them when needed. It is important to keep the NodePool replicas at 0 until all steps are completed. Create the NodePool object by entering the following command: See the output: , create an InfraEnv resource. 1.8.12.6.9.3. Creating an InfraEnv resource for the hosted cluster The InfraEnv resource is an Assisted Service object that includes essential details, such as the pullSecretRef and the sshAuthorizedKey . Those details are used to create the Red Hat Enterprise Linux CoreOS (RHCOS) boot image that is customized for the hosted cluster. Create a YAML file with the following information about the InfraEnv resource, replacing values as necessary: --- apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: hosted-ipv6 namespace: clusters-hosted-ipv6 spec: pullSecretRef: 1 name: pull-secret sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk7ICaUE+/k4zTpxLk4+xFdHi4ZuDi5qjeF52afsNkw0w/glILHhwpL5gnp5WkRuL8GwJuZ1VqLC9EKrdmegn4MrmUlq7WTsP0VFOZFBfq2XRUxo1wrRdor2z0Bbh93ytR+ZsDbbLlGngXaMa0Vbt+z74FqlcajbHTZ6zBmTpBVq5RHtDPgKITdpE1fongp7+ZXQNBlkaavaqv8bnyrP4BWahLP4iO9/xJF9lQYboYwEEDzmnKLMW1VtCE6nJzEgWCufACTbxpNS7GvKtoHT/OVzw8ArEXhZXQUS1UY8zKsX2iXwmyhw5Sj6YboA8WICs4z+TrFP89LmxXY0j6536TQFyRz1iB4WWvCbH5n6W+ABV2e8ssJB1AmEy8QYNwpJQJNpSxzoKBjI73XxvPYYC/IjPFMySwZqrSZCkJYqQ023ySkaQxWZT7in4KeMu7eS2tC+Kn4deJ7KwwUycx8n6RHMeD8Qg9flTHCv3gmab8JKZJqN3hW1D378JuvmIX4V0= 2 1 The pullSecretRef refers to the config map reference in the same namespace as the InfraEnv , where the pull secret is used. 2 The sshAuthorizedKey represents the SSH public key that is placed in the boot image. The SSH key allows access to the worker nodes as the core user. Create the InfraEnv resource by entering the following command: See the following output: , create worker nodes. 1.8.12.6.9.4. Creating worker nodes for the hosted cluster If you are working on a bare metal platform, creating worker nodes is crucial to ensure that the details in the BareMetalHost are correctly configured. If you are working with virtual machines, you can complete the following steps to create empty worker nodes that the Metal3 Operator consumes. To do so, you use the kcli tool. If this is not your first attempt to create worker nodes, you must first delete your setup. To do so, delete the plan by entering the following command: When you are prompted to confirm whether you want to delete the plan, type y . Confirm that you see a message stating that the plan was deleted. Create the virtual machines by entering the following commands: where: start=False means that the virtual machine (VM) will not automatically start upon creation. uefi_legacy=true means that you will use UEFI legacy boot to ensure compatibility with UEFI implementations. plan=hosted-dual indicates the plan name, which identifies a group of machines as a cluster. memory=8192 and numcpus=16 are parameters that specify the resources for the VM, including the RAM and CPU. disks=[200,200] indicates that you are creating two thin-provisioned disks in the VM. nets=[{"name": "dual", "mac": "aa:aa:aa:aa:02:13"}] are network details, including the network name to connect to and the MAC address of the primary interface. restart ksushy restarts the ksushy tool to ensure that the tool detects the VMs that you added. See the resulting output: , create bare metal hosts for the hosted cluster. 1.8.12.6.9.5. Creating bare metal hosts for the hosted cluster A bare metal host is an openshift-machine-api object that encompasses physical and logical details so that it can be identified by a Metal3 Operator. Those details are associated with other Assisted Service objects, known as agents . Important: Before you create the bare metal host and destination nodes, you must create the virtual machines. To create a bare metal host, complete the following steps: Create a YAML file with the following information: Note: Because you have at least one secret that holds the bare metal host credentials, you need to create at least two objects for each worker node. --- apiVersion: v1 kind: Secret metadata: name: hosted-ipv6-worker0-bmc-secret namespace: clusters-hosted-ipv6 data: password: YWRtaW4= username: YWRtaW4= type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: hosted-ipv6-worker0 namespace: clusters-hosted-ipv6 labels: infraenvs.agent-install.openshift.io: hosted-ipv6 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: hosted-ipv6-worker0 2 spec: automatedCleaningMode: disabled 3 bmc: disableCertificateVerification: true 4 address: redfish-virtualmedia://[192.168.125.1]:9000/redfish/v1/Systems/local/hosted-ipv6-worker0 5 credentialsName: hosted-ipv6-worker0-bmc-secret 6 bootMACAddress: aa:aa:aa:aa:03:11 7 online: true 8 1 infraenvs.agent-install.openshift.io serves as the link between the Assisted Installer and the BareMetalHost objects. 2 bmac.agent-install.openshift.io/hostname represents the node name that is adopted during deployment. 3 automatedCleaningMode prevents the node from being erased by the Metal3 Operator. 4 disableCertificateVerification is set to true to bypass certificate validation from the client. 5 address denotes the baseboard management controller (BMC) address of the worker node. 6 credentialsName points to the secret where the user and password credentials are stored. 7 bootMACAddress indicates the interface MAC address that the node starts from. 8 online defines the state of the node after the BareMetalHost object is created. Deploy the BareMetalHost object by entering the following command: During the process, you can view the following output: This output indicates that the process is trying to reach the nodes: This output indicates that the nodes are starting: This output indicates that the nodes started successfully: After the nodes start, notice the agents in the namespace, as shown in this example: The agents represent nodes that are available for installation. To assign the nodes to a hosted cluster, scale up the node pool. 1.8.12.6.9.6. Scaling up the node pool After you create the bare metal hosts, their statuses change from Registering to Provisioning to Provisioned . The nodes start with the LiveISO of the agent and a default pod that is named agent . That agent is responsible for receiving instructions from the Assisted Service Operator to install the OpenShift Container Platform payload. To scale up the node pool, enter the following command: After the scaling process is complete, notice that the agents are assigned to a hosted cluster: Also notice that the node pool replicas are set: Wait until the nodes join the cluster. During the process, the agents provide updates on their stage and status. , monitor the deployment of the hosted cluster. 1.8.12.6.10. Finishing the hosted cluster deployment for an IPv6 network You can monitor the deployment of a hosted cluster from two perspectives: the control plane and the data plane. 1.8.12.6.10.1. Monitoring the control plane While the hosted cluster is deploying, you can enter the following commands to monitor the control plane: Those commands provide information about the following artifacts: The HyperShift Operator The HostedControlPlane pod The bare metal hosts The agents The InfraEnv resource The HostedCluster and NodePool resources 1.8.12.6.10.2. Monitoring the data plane To monitor how the Operators are progressing during the deployment process, enter the following commands: Those commands provide information about the following artifacts: The cluster version The nodes, specifically, about whether the nodes joined the cluster The cluster Operators 1.8.12.7. Configuring hosted control planes on a dual stack network (Technology Preview) The dual-stack network configuration is currently designated as disconnected. The primary reason for this designation is because remote registries do not function with IPv6. The process to configure hosted control planes on a dual-stack network involves the following steps, which are described in detail in the sections that follow: Configure the hypervisor for a dual-stack network Configuring DNS for a dual-stack network Deploying a registry for a dual-stack network Setting up a management cluster for a dual-stack network Configuring the web server for a dual-stack network Configuring image mirroring for a dual-stack network Deploying multicluster engine operator for a dual-stack network Configuring TLS certificates for a dual-stack network Deploying the hosted cluster for a dual-stack network Finishing the deployment for a dual-stack network 1.8.12.7.1. Configuring the hypervisor for a dual stack network The following information applies to virtual machine environments only. 1.8.12.7.1.1. Accessing and deploying packages for a virtual OpenShift Container Platform cluster To deploy a virtual OpenShift Container Platform management cluster, access the required packages by entering the following command: sudo dnf install dnsmasq radvd vim golang podman bind-utils net-tools httpd-tools tree htop strace tmux -y Enable and start the Podman service by entering the following command: systemctl enable --now podman To use kcli to deploy the OpenShift Container Platform management cluster and other virtual components, install and configure the hypervisor by entering the following commands: sudo yum -y install libvirt libvirt-daemon-driver-qemu qemu-kvm sudo usermod -aG qemu,libvirt USD(id -un) sudo newgrp libvirt sudo systemctl enable --now libvirtd sudo dnf -y copr enable karmab/kcli sudo dnf -y install kcli sudo kcli create pool -p /var/lib/libvirt/images default kcli create host kvm -H 127.0.0.1 local sudo setfacl -m u:USD(id -un):rwx /var/lib/libvirt/images kcli create network -c 192.168.122.0/24 default 1.8.12.7.1.2. Enabling the network manager dispatcher Enable the network manager dispatcher to ensure that virtual machines can resolve the required domains, routes, and registries. To enable the network manager dispatcher, in the /etc/NetworkManager/dispatcher.d/ directory, create a script named forcedns that contains the following content, replacing values as necessary to match your environment: #!/bin/bash export IP="192.168.126.1" 1 export BASE_RESOLV_CONF="/run/NetworkManager/resolv.conf" if ! [[ `grep -q "USDIP" /etc/resolv.conf` ]]; then export TMP_FILE=USD(mktemp /etc/forcedns_resolv.conf.XXXXXX) cp USDBASE_RESOLV_CONF USDTMP_FILE chmod --reference=USDBASE_RESOLV_CONF USDTMP_FILE sed -i -e "s/dns.base.domain.name//" -e "s/search /& dns.base.domain.name /" -e "0,/nameserver/s/nameserver/& USDIP\n&/" USDTMP_FILE 2 mv USDTMP_FILE /etc/resolv.conf fi echo "ok" 1 Modify the IP variable to point to the IP address of the hypervisor interface that hosts the OpenShift Container Platform management cluster. 2 Replace dns.base.domain.name with the DNS base domain name. After you create the file, add permissions by entering the following command: chmod 755 /etc/NetworkManager/dispatcher.d/forcedns Run the script and verify that the output returns ok . 1.8.12.7.1.3. Configure BMC access Configure ksushy to simulate baseboard management controllers (BMCs) for the virtual machines. Enter the following commands: sudo dnf install python3-pyOpenSSL.noarch python3-cherrypy -y kcli create sushy-service --ssl --ipv6 --port 9000 sudo systemctl daemon-reload systemctl enable --now ksushy Test whether the service is correctly functioning by entering the following command: systemctl status ksushy 1.8.12.7.1.4. Configuring the hypervisor system to allow connections If you are working in a development environment, configure the hypervisor system to allow various types of connections through different virtual networks within the environment. Note: If you are working in a production environment, you must establish proper rules for the firewalld service and configure SELinux policies to maintain a secure environment. For SELinux, enter the following command: sed -i s/^SELINUX=.*USD/SELINUX=permissive/ /etc/selinux/config; setenforce 0 For firewalld , enter the following command: systemctl disable --now firewalld For libvirtd , enter the following commands: systemctl restart libvirtd systemctl enable --now libvirtd , configure DNS for your environment. 1.8.12.7.1.5. Additional resources For more information about kcli , see the official kcli documentation . 1.8.12.7.2. Configuring DNS for a dual stack network This step is mandatory for both disconnected and connected environments in both virtual and bare metal environments. The key distinction between virtual and bare metal environment lies in the location where you configure the resources. In a non-virtual environment, use a solution like Bind rather than a lightweight solution like dnsmasq . To configure DNS for a dual stack network on a virtual environment, see Default ingress and DNS behavior . To configure DNS for a dual stack network on bare metal, see Configuring DNS on bare metal . , deploy a registry. 1.8.12.7.3. Deploying a registry for a dual stack network For development environments, deploy a small, self-hosted registry by using a Podman container. For production environments, deploy an enterprise-hosted registry, such as Red Hat Quay, Nexus, or Artifactory. To deploy a small registry by using Podman, complete the following steps: As a privileged user, access the USD{HOME} directory and create the following script: #!/usr/bin/env bash set -euo pipefail PRIMARY_NIC=USD(ls -1 /sys/class/net | grep -v podman | head -1) export PATH=/root/bin:USDPATH export PULL_SECRET="/root/baremetal/hub/openshift_pull.json" 1 if [[ ! -f USDPULL_SECRET ]];then echo "Pull Secret not found, exiting..." exit 1 fi dnf -y install podman httpd httpd-tools jq skopeo libseccomp-devel export IP=USD(ip -o addr show USDPRIMARY_NIC | head -1 | awk '{print USD4}' | cut -d'/' -f1) REGISTRY_NAME=registry.USD(hostname --long) REGISTRY_USER=dummy REGISTRY_PASSWORD=dummy KEY=USD(echo -n USDREGISTRY_USER:USDREGISTRY_PASSWORD | base64) echo "{\"auths\": {\"USDREGISTRY_NAME:5000\": {\"auth\": \"USDKEY\", \"email\": \"[email protected]\"}}}" > /root/disconnected_pull.json mv USD{PULL_SECRET} /root/openshift_pull.json.old jq ".auths += {\"USDREGISTRY_NAME:5000\": {\"auth\": \"USDKEY\",\"email\": \"[email protected]\"}}" < /root/openshift_pull.json.old > USDPULL_SECRET mkdir -p /opt/registry/{auth,certs,data,conf} cat <<EOF > /opt/registry/conf/config.yml version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry delete: enabled: true http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 compatibility: schema1: enabled: true EOF openssl req -newkey rsa:4096 -nodes -sha256 -keyout /opt/registry/certs/domain.key -x509 -days 3650 -out /opt/registry/certs/domain.crt -subj "/C=US/ST=Madrid/L=San Bernardo/O=Karmalabs/OU=Guitar/CN=USDREGISTRY_NAME" -addext "subjectAltName=DNS:USDREGISTRY_NAME" cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ update-ca-trust extract htpasswd -bBc /opt/registry/auth/htpasswd USDREGISTRY_USER USDREGISTRY_PASSWORD podman create --name registry --net host --security-opt label=disable --replace -v /opt/registry/data:/var/lib/registry:z -v /opt/registry/auth:/auth:z -v /opt/registry/conf/config.yml:/etc/docker/registry/config.yml -e "REGISTRY_AUTH=htpasswd" -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry" -e "REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -v /opt/registry/certs:/certs:z -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key docker.io/library/registry:latest [ "USD?" == "0" ] || !! systemctl enable --now registry 1 Replace the location of the PULL_SECRET with the appropriate location for your setup. Name the script file registry.sh and save it. When you run the script, it pulls in the following information: The registry name, based on the hypervisor hostname The necessary credentials and user access details Adjust permissions by adding the execution flag as follows: To run the script without any parameters, enter the following command: The script starts the server. The script uses a systemd service for management purposes. If you need to manage the script, you can use the following commands: The root folder for the registry is in the /opt/registry directory and contains the following subdirectories: certs contains the TLS certificates. auth contains the credentials. data contains the registry images. conf contains the registry configuration. 1.8.12.7.4. Setting up the management cluster for a dual stack network To set up an OpenShift Container Platform management cluster, you can use dev-scripts, or if you are based on virtual machines, you can use the kcli tool. The following instructions are specific to the kcli tool. Ensure that the right networks are prepared for use in the hypervisor. The networks will host both the management and hosted clusters. Enter the following kcli command: where: -c specifies the CIDR for the network. -P dhcp=false configures the network to disable the DHCP, which is handled by the dnsmasq that you configured. -P dns=false configures the network to disable the DNS, which is also handled by the dnsmasq that you configured. --domain sets the domain to search. dns.base.domain.name is the DNS base domain name. dual is the name of the network that you are creating. After the network is created, review the following output: Ensure that the pull secret and kcli plan files are in place so that you can deploy the OpenShift Container Platform management cluster: Confirm that the pull secret is in the same folder as the kcli plan, and that the pull secret file is named openshift_pull.json . Add the kcli plan, which contains the OpenShift Container Platform definition, in the mgmt-compact-hub-dual.yaml file. Ensure that you update the file contents to match your environment: plan: hub-dual force: true version: stable tag: "4.x.y-x86_64" 1 cluster: "hub-dual" dualstack: true domain: dns.base.domain.name api_ip: 192.168.126.10 ingress_ip: 192.168.126.11 service_networks: - 172.30.0.0/16 - fd02::/112 cluster_networks: - 10.132.0.0/14 - fd01::/48 disconnected_url: registry.dns.base.domain.name:5000 disconnected_update: true disconnected_user: dummy disconnected_password: dummy disconnected_operators_version: v4.14 disconnected_operators: - name: metallb-operator - name: lvms-operator channels: - name: stable-4.14 disconnected_extra_images: - quay.io/user-name/trbsht:latest - quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 dualstack: true disk_size: 200 extra_disks: [200] memory: 48000 numcpus: 16 ctlplanes: 3 workers: 0 manifests: extra-manifests metal3: true network: dual users_dev: developer users_devpassword: developer users_admin: admin users_adminpassword: admin metallb_pool: dual-virtual-network metallb_ranges: - 192.168.126.150-192.168.126.190 metallb_autoassign: true apps: - users - lvms-operator - metallb-operator vmrules: - hub-bootstrap: nets: - name: ipv6 mac: aa:aa:aa:aa:10:07 - hub-ctlplane-0: nets: - name: ipv6 mac: aa:aa:aa:aa:10:01 - hub-ctlplane-1: nets: - name: ipv6 mac: aa:aa:aa:aa:10:02 - hub-ctlplane-2: nets: - name: ipv6 mac: aa:aa:aa:aa:10:03 1 Replace 4.x.y with the supported OpenShift Container Platform version you want to use. To provision the management cluster, enter the following command: , configure the web server. 1.8.12.7.4.1. Additional resources For more information about the parameters in the kcli plan file, see Create a parameters.yml in the official kcli documentation. 1.8.12.7.5. Configuring the web server for a dual stack network You need to configure an additional web server to host the Red Hat Enterprise Linux CoreOS (RHCOS) images that are associated with the OpenShift Container Platform release that you are deploying as a hosted cluster. To configure the web server, complete the following steps: Extract the openshift-install binary from the OpenShift Container Platform release that you want to use by entering the following command: oc adm -a USD{LOCAL_SECRET_JSON} release extract --command=openshift-install "USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}" Run the following script. The script creates a folder in the /opt/srv directory. The folder contains the RHCOS images to provision the worker nodes. #!/bin/bash WEBSRV_FOLDER=/opt/srv ROOTFS_IMG_URL="USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.pxe.rootfs.location')" 1 LIVE_ISO_URL="USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.iso.disk.location')" 2 mkdir -p USD{WEBSRV_FOLDER}/images curl -Lk USD{ROOTFS_IMG_URL} -o USD{WEBSRV_FOLDER}/images/USD{ROOTFS_IMG_URL##*/} curl -Lk USD{LIVE_ISO_URL} -o USD{WEBSRV_FOLDER}/images/USD{LIVE_ISO_URL##*/} chmod -R 755 USD{WEBSRV_FOLDER}/* ## Run Webserver podman ps --noheading | grep -q websrv-ai if [[ USD? == 0 ]];then echo "Launching Registry pod..." /usr/bin/podman run --name websrv-ai --net host -v /opt/srv:/usr/local/apache2/htdocs:z quay.io/alosadag/httpd:p8080 fi 1 You can find the ROOTFS_IMG_URL value on the OpenShift CI Release page. 2 You can find the LIVE_ISO_URL value on the OpenShift CI Release page. After the download is completed, a container runs to host the images on a web server. The container uses a variation of the official HTTPd image, which also enables it to work with IPv6 networks. 1.8.12.7.6. Configuring image mirroring for a dual stack network Image mirroring is the process of fetching images from external registries, such as registry.redhat.com or quay.io , and storing them in your private registry. 1.8.12.7.6.1. Completing the mirroring process Note: Start the mirroring process after the registry server is running. In the following procedures, the oc-mirror tool is used, which is a binary that uses the ImageSetConfiguration object. In the file, you can specify the following information: The OpenShift Container Platform versions to mirror. The versions are in quay.io . The additional Operators to mirror. Select packages individually. The extra images that you want to add to the repository. To configure image mirroring, complete the following steps: Ensure that your USD{HOME}/.docker/config.json file is updated with the registries that you are going to mirror from and with the private registry that you plan to push the images to. By using the following example, create an ImageSetConfiguration object to use for mirroring. Replace values as needed to match your environment: 1 Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Start the mirroring process by entering the following command: After the mirroring process is finished, you have a new folder named oc-mirror-workspace/results-XXXXXX/ , which contains the ICSP and the catalog sources to apply on the hosted cluster. Mirror the nightly or CI versions of OpenShift Container Platform by using the oc adm release mirror command. Enter the following command: Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Mirror the latest multicluster engine operator images by following the steps in Install on disconnected networks . 1.8.12.7.6.2. Applying objects in the management cluster After the mirroring process is complete, you need to apply two objects in the management cluster: Image Content Source Policies (ICSP) or Image Digest Mirror Set (IDMS) Catalog sources When you use the oc-mirror tool, the output artifacts are in a folder named oc-mirror-workspace/results-XXXXXX/ . ICSP or IDMS initiates a MachineConfig change that does not restart your nodes but restarts the kubelet on each of them. After the nodes are marked as READY , you need to apply the newly generated catalog sources. The catalog sources initiate actions in the openshift-marketplace Operator, such as downloading the catalog image and processing it to retrieve all the PackageManifests that are included in that image. To check the new sources, run the following command by using the new CatalogSource as a source: To apply the artifacts, complete the following steps: Create the ICSP or IDMS artifacts by entering the following command: Wait for the nodes to become ready, and then enter the following command: , deploy multicluster engine operator. 1.8.12.7.6.3. Additional resources If you are working in a virtual environment, after you configure mirroring, ensure that you meet the prerequisites for hosted control planes on OpenShift Virtualization . For more information about mirroring nightly or CI versions of OpenShift Container Platform, see Mirroring images for a disconnected installation using the oc-mirror plugin . 1.8.12.7.7. Deploying multicluster engine operator for a dual stack network The multicluster engine operator plays a crucial role in deploying clusters across providers. If you already installed Red Hat Advanced Cluster Management, you do not need to install multicluster engine operator because it is automatically installed. If you do not have multicluster engine operator installed, review the following documentation to understand the prerequisites and steps to install it: About cluster lifecycle with multicluster engine operator Installing and upgrading multicluster engine operator 1.8.12.7.7.1. Deploying AgentServiceConfig resources The AgentServiceConfig custom resource is an essential component of the Assisted Service add-on that is part of multicluster engine operator. It is responsible for bare metal cluster deployment. When the add-on is enabled, you deploy the AgentServiceConfig resource to configure the add-on. In addition to configuring the AgentServiceConfig resource, you need to include additional config maps to ensure that multicluster engine operator functions properly in a disconnected environment. Configure the custom registries by adding the following config map, which contains the disconnected details to customize the deployment: --- apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] [[registry]] prefix = "" location = "registry.redhat.io/openshift4" mirror-by-digest-only = true [[registry.mirror]] location = "registry.dns.base.domain.name:5000/openshift4" 1 [[registry]] prefix = "" location = "registry.redhat.io/rhacm2" mirror-by-digest-only = true ... ... 1 Replace dns.base.domain.name with the DNS base domain name. The object contains two fields: Custom CAs: This field contains the Certificate Authorities (CAs) that are loaded into the various processes of the deployment. Registries: The Registries.conf field contains information about images and namespaces that need to be consumed from a mirror registry rather than the original source registry. Configure the Assisted Service by adding the AssistedServiceConfig object, as shown in the following example: --- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: unsupported.agent-install.openshift.io/assisted-service-configmap: assisted-service-config 1 name: agent namespace: multicluster-engine spec: mirrorRegistryRef: name: custom-registries 2 databaseStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 10Gi filesystemStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 20Gi osImages: 3 - cpuArchitecture: x86_64 openshiftVersion: "4.14" rootFSUrl: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live-rootfs.x86_64.img 4 url: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live.x86_64.iso version: 414.92.202308281054-0 1 The metadata.annotations["unsupported.agent-install.openshift.io/assisted-service-configmap"] annotation references the config map name that the Operator consumes to customize behavior. 2 The spec.mirrorRegistryRef.name annotation points to the config map that contains disconnected registry information that the Assisted Service Operator consumes. This config map adds those resources during the deployment process. 3 The spec.osImages field contains different versions available for deployment by this Operator. This field is mandatory. This example assumes that you already downloaded the RootFS and LiveISO files. 4 In the rootFSUrl and url fields, replace dns.base.domain.name with the DNS base domain name. Deploy all of the objects by concatenating them into a single file and applying them to the management cluster. To do so, enter the following command: The command triggers two pods, as shown in this example output: 1 The assisted-image-service pod is responsible for creating the Red Hat Enterprise Linux CoreOS (RHCOS) boot image template, which is customized for each cluster that you deploy. 2 The assisted-service refers to the Operator. 1.8.12.7.8. Configuring TLS certificates for a dual stack network Several TLS certificates are involved in the process to configure hosted control planes in a disconnected environment. To add a Certificate Authority (CA) to the management cluster, you need to modify the content of the following files in the OpenShift Container Platform control plane and worker nodes: /etc/pki/ca-trust/extracted/pem/ /etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ To add a CA to the management cluster, complete the following steps: Complete the steps in Updating the CA bundle in the official OpenShift Container Platform documentation. That method involves using the image-registry-operator , which deploys the CAs to the OpenShift Container Platform nodes. If that method does not apply to your situation, check whether the openshift-config namespace in the management cluster contains a config map named user-ca-bundle . If the namespace contains that config map, enter the following command: If the namespace does not contain that config map, enter the following command: 1.8.12.7.9. Deploying the hosted cluster for a dual stack network A hosted cluster is an OpenShift Container Platform cluster with its control plane and API endpoint hosted on a management cluster. The hosted cluster includes the control plane and its corresponding data plane. Although you can use the console in Red Hat Advanced Cluster Management to create a hosted cluster, the following procedures use manifests, which provide more flexibility for modifying the related artifacts. 1.8.12.7.9.1. Deploying hosted cluster objects For the purposes of this procedure, the following values are used: HostedCluster name: hosted-dual HostedCluster namespace: clusters Disconnected: true Network stack: Dual Typically, the HyperShift Operator creates the HostedControlPlane namespace. However, in this case, you want to include all the objects before the HyperShift Operator begins to reconcile the HostedCluster object. Then, when the Operator starts the reconciliation process, it can find all of the objects in place. Create a YAML file with the following information about the namespaces: --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters-hosted-dual spec: {} status: {} --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {} Create a YAML file with the following information about the config maps and secrets to include in the HostedCluster deployment: --- apiVersion: v1 data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- kind: ConfigMap metadata: name: user-ca-bundle namespace: clusters --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxx kind: Secret metadata: creationTimestamp: null name: hosted-dual-pull-secret namespace: clusters --- apiVersion: v1 kind: Secret metadata: name: sshkey-cluster-hosted-dual namespace: clusters stringData: id_rsa.pub: ssh-rsa xxxxxxxxx --- apiVersion: v1 data: key: nTPtVBEt03owkrKhIdmSW8jrWRxU57KO/fnZa8oaG0Y= kind: Secret metadata: creationTimestamp: null name: hosted-dual-etcd-encryption-key namespace: clusters type: Opaque Create a YAML file that contains the RBAC roles so that Assisted Service agents can be in the same HostedControlPlane namespace as the hosted control plane and still be managed by the cluster API: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null name: capi-provider-role namespace: clusters-hosted-dual rules: - apiGroups: - agent-install.openshift.io resources: - agents verbs: - '*' Create a YAML file with information about the HostedCluster object, replacing values as necessary: apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: hosted-dual namespace: clusters spec: additionalTrustBundle: name: "user-ca-bundle" olmCatalogPlacement: guest imageContentSources: 1 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev mirrors: - registry.dns.base.domain.name:5000/openshift/release 2 - source: quay.io/openshift-release-dev/ocp-release mirrors: - registry.dns.base.domain.name:5000/openshift/release-images - mirrors: ... ... autoscaling: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: dns.base.domain.name etcd: managed: storage: persistentVolume: size: 8Gi restoreSnapshotURL: null type: PersistentVolume managementType: Managed fips: false networking: clusterNetwork: - cidr: 10.132.0.0/14 - cidr: fd01::/48 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 - cidr: fd02::/112 platform: agent: agentNamespace: clusters-hosted-dual type: Agent pullSecret: name: hosted-dual-pull-secret release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 secretEncryption: aescbc: activeKey: name: hosted-dual-etcd-encryption-key type: aescbc services: - service: APIServer servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: OAuthServer servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: OIDC servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: Konnectivity servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: Ignition servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort sshKey: name: sshkey-cluster-hosted-dual status: controlPlaneEndpoint: host: "" port: 0 1 The imageContentSources section contains mirror references for user workloads within the hosted cluster. 2 Throughout the YAML file, replace dns.base.domain.name with the DNS base domain name. 3 Replace 4.x.y with the supported OpenShift Container Platform version you want to use. Add an annotation in the HostedCluster object that points to the HyperShift Operator release in the OpenShift Container Platform release: Obtain the image payload by entering the following command: where dns.base.domain.name is the DNS base domain name and 4.x.y is the supported OpenShift Container Platform version you want to use. See the following output: By using the OpenShift Container Platform Images namespace, check the digest by entering the following command: where dns.base.domain.name is the DNS base domain name. See the following output: Note: The release image that is set in the HostedCluster object must use the digest rather than the tag; for example, quay.io/openshift-release-dev/ocp-release@sha256:e3ba11bd1e5e8ea5a0b36a75791c90f29afb0fdbe4125be4e48f69c76a5c47a0 . Create all of the objects that you defined in the YAML files by concatenating them into a file and applying them against the management cluster. To do so, enter the following command: See the output for the hosted control plane: See the output for the hosted cluster: , create a NodePool object. 1.8.12.7.9.2. Creating a NodePool object for the hosted cluster A NodePool is a scalable set of worker nodes that is associated with a hosted cluster. NodePool machine architectures remain consistent within a specific pool and are independent of the machine architecture of the control plane. Create a YAML file with the following information about the NodePool object, replacing values as necessary: apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: hosted-dual namespace: clusters spec: arch: amd64 clusterName: hosted-dual management: autoRepair: false 1 upgradeType: InPlace 2 nodeDrainTimeout: 0s platform: type: Agent release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 replicas: 0 status: replicas: 0 4 1 The autoRepair field is set to false because the node will not be re-created if it is removed. 2 The upgradeType is set to InPlace , which indicates that the same bare metal node is reused during an upgrade. 3 All of the nodes included in this NodePool are based on the following OpenShift Container Platform version: 4.x.y-x86_64 . Replace the dns.base.domain.name value with your DNS base domain name and the 4.x.y value with the supported OpenShift Container Platform version you want to use. 4 The replicas value is set to 0 so that you can scale them when needed. It is important to keep the NodePool replicas at 0 until all steps are completed. Create the NodePool object by entering the following command: See the output: , create an InfraEnv resource. 1.8.12.7.9.3. Creating an InfraEnv resource for the hosted cluster The InfraEnv resource is an Assisted Service object that includes essential details, such as the pullSecretRef and the sshAuthorizedKey . Those details are used to create the Red Hat Enterprise Linux CoreOS (RHCOS) boot image that is customized for the hosted cluster. Create a YAML file with the following information about the InfraEnv resource, replacing values as necessary: --- apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: hosted-dual namespace: clusters-hosted-dual spec: pullSecretRef: 1 name: pull-secret sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk7ICaUE+/k4zTpxLk4+xFdHi4ZuDi5qjeF52afsNkw0w/glILHhwpL5gnp5WkRuL8GwJuZ1VqLC9EKrdmegn4MrmUlq7WTsP0VFOZFBfq2XRUxo1wrRdor2z0Bbh93ytR+ZsDbbLlGngXaMa0Vbt+z74FqlcajbHTZ6zBmTpBVq5RHtDPgKITdpE1fongp7+ZXQNBlkaavaqv8bnyrP4BWahLP4iO9/xJF9lQYboYwEEDzmnKLMW1VtCE6nJzEgWCufACTbxpNS7GvKtoHT/OVzw8ArEXhZXQUS1UY8zKsX2iXwmyhw5Sj6YboA8WICs4z+TrFP89LmxXY0j6536TQFyRz1iB4WWvCbH5n6W+ABV2e8ssJB1AmEy8QYNwpJQJNpSxzoKBjI73XxvPYYC/IjPFMySwZqrSZCkJYqQ023ySkaQxWZT7in4KeMu7eS2tC+Kn4deJ7KwwUycx8n6RHMeD8Qg9flTHCv3gmab8JKZJqN3hW1D378JuvmIX4V0= 2 1 The pullSecretRef refers to the config map reference in the same namespace as the InfraEnv , where the pull secret is used. 2 The sshAuthorizedKey represents the SSH public key that is placed in the boot image. The SSH key allows access to the worker nodes as the core user. Create the InfraEnv resource by entering the following command: See the following output: , create worker nodes. 1.8.12.7.9.4. Creating worker nodes for the hosted cluster If you are working on a bare metal platform, creating worker nodes is crucial to ensure that the details in the BareMetalHost are correctly configured. If you are working with virtual machines, you can complete the following steps to create empty worker nodes for the Metal3 Operator to consume. To do so, you use the kcli tool. If this is not your first attempt to create worker nodes, you must first delete your setup. To do so, delete the plan by entering the following command: When you are prompted to confirm whether you want to delete the plan, type y . Confirm that you see a message stating that the plan was deleted. Create the virtual machines by entering the following commands: where: start=False means that the virtual machine (VM) will not automatically start upon creation. uefi_legacy=true means that you will use UEFI legacy boot to ensure compatibility with UEFI implementations. plan=hosted-dual indicates the plan name, which identifies a group of machines as a cluster. memory=8192 and numcpus=16 are parameters that specify the resources for the VM, including the RAM and CPU. disks=[200,200] indicates that you are creating two thin-provisioned disks in the VM. nets=[{"name": "dual", "mac": "aa:aa:aa:aa:02:13"}] are network details, including the network name to connect to and the MAC address of the primary interface. restart ksushy restarts the ksushy tool to ensure that the tool detects the VMs that you added. See the resulting output: , create bare metal hosts for the hosted cluster. 1.8.12.7.9.5. Creating bare metal hosts for the hosted cluster A bare metal host is an openshift-machine-api object that encompasses physical and logical details so that it can be identified by a Metal3 Operator. Those details are associated with other Assisted Service objects, known as agents . Important: Before you create the bare metal host and destination nodes, you must create the virtual machines. To create a bare metal host, complete the following steps: Create a YAML file with the following information: Note: Because you have at least one secret that holds the bare metal host credentials, you need to create at least two objects for each worker node. --- apiVersion: v1 kind: Secret metadata: name: hosted-dual-worker0-bmc-secret namespace: clusters-hosted-dual data: password: YWRtaW4= username: YWRtaW4= type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: hosted-dual-worker0 namespace: clusters-hosted-dual labels: infraenvs.agent-install.openshift.io: hosted-dual 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: hosted-dual-worker0 2 spec: automatedCleaningMode: disabled 3 bmc: disableCertificateVerification: true 4 address: redfish-virtualmedia://[192.168.126.1]:9000/redfish/v1/Systems/local/hosted-dual-worker0 5 credentialsName: hosted-dual-worker0-bmc-secret 6 bootMACAddress: aa:aa:aa:aa:02:11 7 online: true 8 1 infraenvs.agent-install.openshift.io serves as the link between the Assisted Installer and the BareMetalHost objects. 2 bmac.agent-install.openshift.io/hostname represents the node name that is adopted during deployment. 3 automatedCleaningMode prevents the node from being erased by the Metal3 Operator. 4 disableCertificateVerification is set to true to bypass certificate validation from the client. 5 address denotes the baseboard management controller (BMC) address of the worker node. 6 credentialsName points to the secret where the user and password credentials are stored. 7 bootMACAddress indicates the interface MAC address that the node starts from. 8 online defines the state of the node after the BareMetalHost object is created. Deploy the BareMetalHost object by entering the following command: During the process, you can view the following output: This output indicates that the process is trying to reach the nodes: This output indicates that the nodes are starting: This output indicates that the nodes started successfully: After the nodes start, notice the agents in the namespace, as shown in this example: The agents represent nodes that are available for installation. To assign the nodes to a hosted cluster, scale up the node pool. 1.8.12.7.9.6. Scaling up the node pool After you create the bare metal hosts, their statuses change from Registering to Provisioning to Provisioned . The nodes start with the LiveISO of the agent and a default pod that is named agent . That agent is responsible for receiving instructions from the Assisted Service Operator to install the OpenShift Container Platform payload. To scale up the node pool, enter the following command: After the scaling process is complete, notice that the agents are assigned to a hosted cluster: Also notice that the node pool replicas are set: Replace 4.x.y with the supported OpenShift Container Platform version that you want to use. Wait until the nodes join the cluster. During the process, the agents provide updates on their stage and status. , monitor the deployment of the hosted cluster. 1.8.12.7.10. Finishing the hosted cluster deployment for a dual stack network You can monitor the deployment of a hosted cluster from two perspectives: the control plane and the data plane. 1.8.12.7.10.1. Monitoring the control plane While the hosted cluster is deploying, you can enter the following commands to monitor the control plane: Those commands provide information about the following artifacts: The HyperShift Operator The HostedControlPlane pod The bare metal hosts The agents The InfraEnv resource The HostedCluster and NodePool resources 1.8.12.7.10.2. Monitoring the data plane To monitor how the Operators are progressing during the deployment process, enter the following commands: Those commands provide information about the following artifacts: The cluster version The nodes, specifically, about whether the nodes joined the cluster The cluster Operators 1.8.13. Manually importing a hosted control plane cluster Hosted clusters are automatically imported into multicluster engine operator after the hosted control plane becomes available. If you want to import hosted clusters manually, complete the following steps: In the console, click Infrastructure > Clusters and select the hosted cluster that you want to import. Click Import hosted cluster . Note: For your discovered hosted cluster, you can also import from the console, but the cluster must be in an upgradable state. Import on your cluster is disabled if the hosted cluster is not in an upgradable state because the hosted control plane is not available. Click Import to begin the process. The status is Importing while the cluster receives updates and then changes to Ready . 1.8.13.1. Manually importing a hosted control plane cluster on AWS You can also import a hosted control plane cluster on AWS with the command line interface by completing the following steps: Create your ManagedCluster resource by using the following sample YAML file: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: import.open-cluster-management.io/hosting-cluster-name: local-cluster import.open-cluster-management.io/klusterlet-deploy-mode: Hosted open-cluster-management/created-via: hypershift labels: cloud: auto-detect cluster.open-cluster-management.io/clusterset: default name: <cluster_name> vendor: OpenShift name: <cluster_name> spec: hubAcceptsClient: true leaseDurationSeconds: 60 Replace <cluster_name> with the name of your hosted cluster. Run the following command to apply the resource: Replace <file_name> with the YAML file name you created in the step. Create your KlusterletAddonConfig resource by using the following sample YAML file. This only applies to Red Hat Advanced Cluster Management. If you have installed multicluster engine operator only, skip this step: apiVersion: agent.open-cluster-management.io/v1 kind: KlusterletAddonConfig metadata: name: <cluster_name> namespace: <cluster_name> spec: clusterName: <cluster_name> clusterNamespace: <cluster_name> clusterLabels: cloud: auto-detect vendor: auto-detect applicationManager: enabled: true certPolicyController: enabled: true policyController: enabled: true searchCollector: enabled: false Replace <cluster_name> with the name of your hosted cluster. Run the following command to apply the resource: Replace <file_name> with the YAML file name you created in the step. After the import process is complete, your hosted cluster becomes visible in the console. You can also check the status of your hosted cluster by running the following command: 1.8.13.2. Additional resources For instructions to disable the automatic import of hosted clusters, see Disabling the automatic import of hosted clusters into multicluster engine operator . 1.8.13.3. Disabling the automatic import of hosted clusters into multicluster engine operator Hosted clusters are automatically imported into multicluster engine operator after the control plane becomes available, you can disable the automatic import of hosted clusters. Any hosted clusters that were previously imported are not affected, even if you disable automatic import. When you upgrade to multicluster engine operator 2.5 and automatic import is enabled, all hosted clusters that are not imported are automatically imported if their control planes are available. Note: All Red Hat Advanced Cluster Management add-ons are also enabled if Red Hat Advanced Cluster Management is installed. When automatic import is disabled, only newly created hosted clusters are not automatically imported. Hosted clusters that were already imported are not affected. You can still manually import clusters by using the console or by creating the ManagedCluster and KlusterletAddonConfig custom resources. To disable the automatic import of hosted clusters, complete the following steps: On the hub cluster, open the hypershift-addon-deploy-config specification that is in the AddonDeploymentConfig resource in the namespace where multicluster engine operator is installed by entering the following command: In the spec.customizedVariables section, add the autoImportDisabled variable with value of "true" , as shown in the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: hypershift-addon-deploy-config namespace: multicluster-engine spec: customizedVariables: - name: hcMaxNumber value: "80" - name: hcThresholdNumber value: "60" - name: autoImportDisabled value: "true" To re-enable automatic import, set the value of the autoImportDisabled variable to "false" or remove the variable from the AddonDeploymentConfig resource. 1.8.13.3.1. Additional resources For instructions to manually import a hosted cluster, see Manually importing a hosted control plane cluster . 1.8.14. Enabling or disabling the hosted control plane feature The hosted control planes feature, as well as the hypershift-addon managed cluster add-on, are enabled by default. If you want to disable the feature, or if you disabled it and want to manually enable it, see the following procedures: Manually enabling the hosted control planes feature Disabling the hosted control planes feature 1.8.14.1. Manually enabling the hosted control planes feature You can run the following command to enable the feature: 1 The default MultiClusterEngine resource instance name is multiclusterengine , but you can get the MultiClusterEngine name from your cluster by running the following command: USD oc get mce . Run the following command to verify that the hypershift and hypershift-local-hosting features are enabled in the MultiClusterEngine custom resource: 1 The default MultiClusterEngine resource instance name is multiclusterengine , but you can get the MultiClusterEngine name from your cluster by running the following command: USD oc get mce . The output resembles the following example: apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: hypershift enabled: true - name: hypershift-local-hosting enabled: true 1.8.14.1.1. Manually enabling the hypershift-addon managed cluster add-on for local-cluster Enabling the hosted control planes feature automatically enables the hypershift-addon managed cluster add-on. If you need to enable the hypershift-addon managed cluster add-on manually, complete the following steps to use the hypershift-addon to install the HyperShift Operator on local-cluster : Create the ManagedClusterAddon HyperShift add-on by creating a file that resembles the following example: apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ManagedClusterAddOn metadata: name: hypershift-addon namespace: local-cluster spec: installNamespace: open-cluster-management-agent-addon Apply the file by running the following command: Replace filename with the name of the file that you created. Confirm that the hypershift-addon is installed by running the following command: If the add-on is installed, the output resembles the following example: Your HyperShift add-on is installed and the hosting cluster is available to create and manage hosted clusters. 1.8.14.2. Disabling the hosted control planes feature You can uninstall the HyperShift Operator and disable the hosted control plane. When you disable the hosted control plane cluster feature, you must destroy the hosted cluster and the managed cluster resource on multicluster engine operator, as described in the Managing hosted control plane clusters topics. 1.8.14.2.1. Uninstalling the HyperShift Operator To uninstall the HyperShift Operator and disable the hypershift-addon from the local-cluster , complete the following steps: Run the following command to ensure that there is no hosted cluster running: Important: If a hosted cluster is running, the HyperShift Operator does not uninstall, even if the hypershift-addon is disabled. Disable the hypershift-addon by running the following command: 1 The default MultiClusterEngine resource instance name is multiclusterengine , but you can get the MultiClusterEngine name from your cluster by running the following command: USD oc get mce . Note: You can also disable the hypershift-addon for the local-cluster from the multicluster engine operator console after disabling the hypershift-addon . 1.8.14.2.2. Disabling the hosted control planes feature You must first uninstall the HyperShift Operator before disabling the hosted control planes feature. Run the following command to disable the hosted control planes feature: 1 The default MultiClusterEngine resource instance name is multiclusterengine , but you can get the MultiClusterEngine name from your cluster by running the following command: USD oc get mce . You can verify that the hypershift and hypershift-local-hosting features are disabled in the MultiClusterEngine custom resource by running the following command: 1 The default MultiClusterEngine resource instance name is multiclusterengine , but you can get the MultiClusterEngine name from your cluster by running the following command: USD oc get mce . See the following example where hypershift and hypershift-local-hosting have their enabled: flags set to false : apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: hypershift enabled: false - name: hypershift-local-hosting enabled: false 1.8.14.3. Additional resources Configuring hosted control plane clusters on AWS Configuring hosted control plane clusters on bare metal 1.9. APIs You can access the following APIs for cluster lifecycle management with the multicluster engine operator. User required access: You can only perform actions that your role is assigned. Note: You can also access all APIs from the integrated console. From the local-cluster view, navigate to Home > API Explorer to explore API groups. For more information, review the API documentation for each of the following resources: Clusters API ClusterSets API (v1beta2) Clusterview API ClusterSetBindings API (v1beta2) MultiClusterEngine API Placements API (v1beta1) PlacementDecisions API (v1beta1) ManagedServiceAccount API 1.9.1. Clusters API 1.9.1.1. Overview This documentation is for the cluster resource for multicluster engine for Kubernetes operator. Cluster resource has four possible requests: create, query, delete and update. 1.9.1.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.1.1.2. Tags cluster.open-cluster-management.io : Create and manage clusters 1.9.1.2. Paths 1.9.1.2.1. Query all clusters 1.9.1.2.1.1. Description Query your clusters for more details. 1.9.1.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string 1.9.1.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.1.2.1.4. Consumes cluster/yaml 1.9.1.2.1.5. Tags cluster.open-cluster-management.io 1.9.1.2.2. Create a cluster 1.9.1.2.2.1. Description Create a cluster 1.9.1.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body required Parameters describing the cluster to be created. Cluster 1.9.1.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.1.2.2.4. Consumes cluster/yaml 1.9.1.2.2.5. Tags cluster.open-cluster-management.io 1.9.1.2.2.6. Example HTTP request 1.9.1.2.2.6.1. Request body { "apiVersion" : "cluster.open-cluster-management.io/v1", "kind" : "ManagedCluster", "metadata" : { "labels" : { "vendor" : "OpenShift" }, "name" : "cluster1" }, "spec": { "hubAcceptsClient": true, "managedClusterClientConfigs": [ { "caBundle": "test", "url": "https://test.com" } ] }, "status" : { } } 1.9.1.2.3. Query a single cluster 1.9.1.2.3.1. Description Query a single cluster for more details. 1.9.1.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path cluster_name required Name of the cluster that you want to query. string 1.9.1.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.1.2.3.4. Tags cluster.open-cluster-management.io 1.9.1.2.4. Delete a cluster 1.9.1.2.4.1. Description Delete a single cluster 1.9.1.2.4.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path cluster_name required Name of the cluster that you want to delete. string 1.9.1.2.4.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.1.2.4.4. Tags cluster.open-cluster-management.io 1.9.1.3. Definitions 1.9.1.3.1. Cluster Name Schema apiVersion required string kind required string metadata required object spec required spec spec Name Schema hubAcceptsClient required bool managedClusterClientConfigs optional < managedClusterClientConfigs > array leaseDurationSeconds optional integer (int32) managedClusterClientConfigs Name Description Schema URL required string CABundle optional Pattern : "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?USD" string (byte) 1.9.2. Clustersets API (v1beta2) 1.9.2.1. Overview This documentation is for the Clusterset resource for multicluster engine for Kubernetes operator. Clusterset resource has four possible requests: create, query, delete and update. 1.9.2.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.2.1.2. Tags cluster.open-cluster-management.io : Create and manage Clustersets 1.9.2.2. Paths 1.9.2.2.1. Query all clustersets 1.9.2.2.1.1. Description Query your Clustersets for more details. 1.9.2.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string 1.9.2.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.2.2.1.4. Consumes clusterset/yaml 1.9.2.2.1.5. Tags cluster.open-cluster-management.io 1.9.2.2.2. Create a clusterset 1.9.2.2.2.1. Description Create a Clusterset. 1.9.2.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body required Parameters describing the clusterset to be created. Clusterset 1.9.2.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.2.2.2.4. Consumes clusterset/yaml 1.9.2.2.2.5. Tags cluster.open-cluster-management.io 1.9.2.2.2.6. Example HTTP request 1.9.2.2.2.6.1. Request body { "apiVersion" : "cluster.open-cluster-management.io/v1beta2", "kind" : "ManagedClusterSet", "metadata" : { "name" : "clusterset1" }, "spec": { }, "status" : { } } 1.9.2.2.3. Query a single clusterset 1.9.2.2.3.1. Description Query a single clusterset for more details. 1.9.2.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path clusterset_name required Name of the clusterset that you want to query. string 1.9.2.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.2.2.3.4. Tags cluster.open-cluster-management.io 1.9.2.2.4. Delete a clusterset 1.9.2.2.4.1. Description Delete a single clusterset. 1.9.2.2.4.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path clusterset_name required Name of the clusterset that you want to delete. string 1.9.2.2.4.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.2.2.4.4. Tags cluster.open-cluster-management.io 1.9.2.3. Definitions 1.9.2.3.1. Clusterset Name Schema apiVersion required string kind required string metadata required object 1.9.3. Clustersetbindings API (v1beta2) 1.9.3.1. Overview This documentation is for the clustersetbinding resource for multicluster engine for Kubernetes. Clustersetbinding resource has four possible requests: create, query, delete and update. 1.9.3.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.3.1.2. Tags cluster.open-cluster-management.io : Create and manage clustersetbindings 1.9.3.2. Paths 1.9.3.2.1. Query all clustersetbindings 1.9.3.2.1.1. Description Query your clustersetbindings for more details. 1.9.3.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path namespace required Namespace that you want to use, for example, default. string 1.9.3.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.3.2.1.4. Consumes clustersetbinding/yaml 1.9.3.2.1.5. Tags cluster.open-cluster-management.io 1.9.3.2.2. Create a clustersetbinding 1.9.3.2.2.1. Description Create a clustersetbinding. 1.9.3.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path namespace required Namespace that you want to use, for example, default. string Body body required Parameters describing the clustersetbinding to be created. Clustersetbinding 1.9.3.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.3.2.2.4. Consumes clustersetbinding/yaml 1.9.3.2.2.5. Tags cluster.open-cluster-management.io 1.9.3.2.2.6. Example HTTP request 1.9.3.2.2.6.1. Request body { "apiVersion" : "cluster.open-cluster-management.io/v1", "kind" : "ManagedClusterSetBinding", "metadata" : { "name" : "clusterset1", "namespace" : "ns1" }, "spec": { "clusterSet": "clusterset1" }, "status" : { } } 1.9.3.2.3. Query a single clustersetbinding 1.9.3.2.3.1. Description Query a single clustersetbinding for more details. 1.9.3.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path namespace required Namespace that you want to use, for example, default. string Path clustersetbinding_name required Name of the clustersetbinding that you want to query. string 1.9.3.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.3.2.3.4. Tags cluster.open-cluster-management.io 1.9.3.2.4. Delete a clustersetbinding 1.9.3.2.4.1. Description Delete a single clustersetbinding. 1.9.3.2.4.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path namespace required Namespace that you want to use, for example, default. string Path clustersetbinding_name required Name of the clustersetbinding that you want to delete. string 1.9.3.2.4.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.3.2.4.4. Tags cluster.open-cluster-management.io 1.9.3.3. Definitions 1.9.3.3.1. Clustersetbinding Name Schema apiVersion required string kind required string metadata required object spec required spec spec Name Schema clusterSet required string 1.9.4. Clusterview API (v1alpha1) 1.9.4.1. Overview This documentation is for the clusterview resource for multicluster engine for Kubernetes. The clusterview resource provides a CLI command that enables you to view a list of the managed clusters and managed cluster sets that that you can access. The three possible requests are: list, get, and watch. 1.9.4.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.4.1.2. Tags clusterview.open-cluster-management.io : View a list of managed clusters that your ID can access. 1.9.4.2. Paths 1.9.4.2.1. Get managed clusters 1.9.4.2.1.1. Description View a list of the managed clusters that you can access. 1.9.4.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string 1.9.4.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.4.2.1.4. Consumes managedcluster/yaml 1.9.4.2.1.5. Tags clusterview.open-cluster-management.io 1.9.4.2.2. List managed clusters 1.9.4.2.2.1. Description View a list of the managed clusters that you can access. 1.9.4.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body optional Name of the user ID for which you want to list the managed clusters. string 1.9.4.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.4.2.2.4. Consumes managedcluster/yaml 1.9.4.2.2.5. Tags clusterview.open-cluster-management.io 1.9.4.2.2.6. Example HTTP request 1.9.4.2.2.6.1. Request body { "apiVersion" : "clusterview.open-cluster-management.io/v1alpha1", "kind" : "ClusterView", "metadata" : { "name" : "<user_ID>" }, "spec": { }, "status" : { } } 1.9.4.2.3. Watch the managed cluster sets 1.9.4.2.3.1. Description Watch the managed clusters that you can access. 1.9.4.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path clusterview_name optional Name of the user ID that you want to watch. string 1.9.4.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.4.2.4. List the managed cluster sets. 1.9.4.2.4.1. Description List the managed clusters that you can access. 1.9.4.2.4.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path clusterview_name optional Name of the user ID that you want to watch. string 1.9.4.2.4.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.4.2.5. List the managed cluster sets. 1.9.4.2.5.1. Description List the managed clusters that you can access. 1.9.4.2.5.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path clusterview_name optional Name of the user ID that you want to watch. string 1.9.4.2.5.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.4.2.6. Watch the managed cluster sets. 1.9.4.2.6.1. Description Watch the managed clusters that you can access. 1.9.4.2.6.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path clusterview_name optional Name of the user ID that you want to watch. string 1.9.4.2.6.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.5. ManagedServiceAccount API (v1alpha1) (Deprecated) 1.9.5.1. Overview This documentation is for the ManagedServiceAccount resource for the multicluster engine operator. The ManagedServiceAccount resource has four possible requests: create, query, delete, and update. Deprecated: The v1alpha1 API is deprecated. For best results, use v1beta1 instead. 1.9.5.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.5.1.2. Tags managedserviceaccounts.authentication.open-cluster-management.io` : Create and manage ManagedServiceAccounts 1.9.5.2. Paths 1.9.5.2.1. Create a ManagedServiceAccount 1.9.5.2.1.1. Description Create a ManagedServiceAccount . 1.9.5.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body required Parameters describing the ManagedServiceAccount to be created. ManagedServiceAccount 1.9.5.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.5.2.1.4. Consumes managedserviceaccount/yaml 1.9.5.2.1.5. Tags managedserviceaccounts.authentication.open-cluster-management.io 1.9.5.2.1.5.1. Request body apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.14.0 name: managedserviceaccounts.authentication.open-cluster-management.io spec: group: authentication.open-cluster-management.io names: kind: ManagedServiceAccount listKind: ManagedServiceAccountList plural: managedserviceaccounts singular: managedserviceaccount scope: Namespaced versions: - deprecated: true deprecationWarning: authentication.open-cluster-management.io/v1alpha1 ManagedServiceAccount is deprecated; use authentication.open-cluster-management.io/v1beta1 ManagedServiceAccount; version v1alpha1 will be removed in the release name: v1alpha1 schema: openAPIV3Schema: description: ManagedServiceAccount is the Schema for the managedserviceaccounts API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: ManagedServiceAccountSpec defines the desired state of ManagedServiceAccount properties: rotation: description: Rotation is the policy for rotation the credentials. properties: enabled: default: true description: |- Enabled prescribes whether the ServiceAccount token will be rotated from the upstream type: boolean validity: default: 8640h0m0s description: Validity is the duration for which the signed ServiceAccount token is valid. type: string type: object ttlSecondsAfterCreation: description: |- ttlSecondsAfterCreation limits the lifetime of a ManagedServiceAccount. If the ttlSecondsAfterCreation field is set, the ManagedServiceAccount will be automatically deleted regardless of the ManagedServiceAccount's status. When the ManagedServiceAccount is deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the ManagedServiceAccount won't be automatically deleted. If this field is set to zero, the ManagedServiceAccount becomes eligible for deletion immediately after its creation. In order to use ttlSecondsAfterCreation, the EphemeralIdentity feature gate must be enabled. exclusiveMinimum: true format: int32 minimum: 0 type: integer required: - rotation type: object status: description: ManagedServiceAccountStatus defines the observed state of ManagedServiceAccount properties: conditions: description: Conditions is the condition list. items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t \ // other fields\n\t}" properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: description: |- observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: description: |- reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?USD type: string status: description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])USD type: string required: - lastTransitionTime - message - reason - status - type type: object type: array expirationTimestamp: description: ExpirationTimestamp is the time when the token will expire. format: date-time type: string tokenSecretRef: description: |- TokenSecretRef is a reference to the corresponding ServiceAccount's Secret, which stores the CA certficate and token from the managed cluster. properties: lastRefreshTimestamp: description: |- LastRefreshTimestamp is the timestamp indicating when the token in the Secret is refreshed. format: date-time type: string name: description: Name is the name of the referenced secret. type: string required: - lastRefreshTimestamp - name type: object type: object type: object served: true storage: false subresources: status: {} - name: v1beta1 schema: openAPIV3Schema: description: ManagedServiceAccount is the Schema for the managedserviceaccounts API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: ManagedServiceAccountSpec defines the desired state of ManagedServiceAccount properties: rotation: description: Rotation is the policy for rotation the credentials. properties: enabled: default: true description: |- Enabled prescribes whether the ServiceAccount token will be rotated before it expires. Deprecated: All ServiceAccount tokens will be rotated before they expire regardless of this field. type: boolean validity: default: 8640h0m0s description: Validity is the duration of validity for requesting the signed ServiceAccount token. type: string type: object ttlSecondsAfterCreation: description: |- ttlSecondsAfterCreation limits the lifetime of a ManagedServiceAccount. If the ttlSecondsAfterCreation field is set, the ManagedServiceAccount will be automatically deleted regardless of the ManagedServiceAccount's status. When the ManagedServiceAccount is deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the ManagedServiceAccount won't be automatically deleted. If this field is set to zero, the ManagedServiceAccount becomes eligible for deletion immediately after its creation. In order to use ttlSecondsAfterCreation, the EphemeralIdentity feature gate must be enabled. exclusiveMinimum: true format: int32 minimum: 0 type: integer required: - rotation type: object status: description: ManagedServiceAccountStatus defines the observed state of ManagedServiceAccount properties: conditions: description: Conditions is the condition list. items: description: "Condition contains details for one aspect of the current state of this API Resource.\n---\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\n\n\n\ttype FooStatus struct{\n\t // Represents the observations of a foo's current state.\n\t // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t \ // other fields\n\t}" properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: description: |- observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: description: |- reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?USD type: string status: description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])USD type: string required: - lastTransitionTime - message - reason - status - type type: object type: array expirationTimestamp: description: ExpirationTimestamp is the time when the token will expire. format: date-time type: string tokenSecretRef: description: |- TokenSecretRef is a reference to the corresponding ServiceAccount's Secret, which stores the CA certficate and token from the managed cluster. properties: lastRefreshTimestamp: description: |- LastRefreshTimestamp is the timestamp indicating when the token in the Secret is refreshed. format: date-time type: string name: description: Name is the name of the referenced secret. type: string required: - lastRefreshTimestamp - name type: object type: object type: object served: true storage: true subresources: status: {} 1.9.5.2.2. Query a single ManagedServiceAccount 1.9.5.2.2.1. Description Query a single ManagedServiceAccount for more details. 1.9.5.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path managedserviceaccount_name required Name of the ManagedServiceAccount that you want to query. string 1.9.5.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.5.2.2.4. Tags managedserviceaccounts.authentication.open-cluster-management.io 1.9.5.2.3. Delete a ManagedServiceAccount 1.9.5.2.3.1. Description Delete a single ManagedServiceAccount . 1.9.5.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path managedserviceaccount_name required Name of the ManagedServiceAccount that you want to delete. string 1.9.5.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.5.2.3.4. Tags managedserviceaccounts.authentication.open-cluster-management.io 1.9.5.3. Definitions 1.9.5.3.1. ManagedServiceAccount Name Description Schema apiVersion required The versioned schema of the ManagedServiceAccount . string kind required String value that represents the REST resource. string metadata required The meta data of the ManagedServiceAccount . object spec required The specification of the ManagedServiceAccount . 1.9.6. MultiClusterEngine API (v1alpha1) 1.9.6.1. Overview This documentation is for the MultiClusterEngine resource for multicluster engine for Kubernetes. The MultiClusterEngine resource has four possible requests: create, query, delete, and update. 1.9.6.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.6.1.2. Tags multiclusterengines.multicluster.openshift.io : Create and manage MultiClusterEngines 1.9.6.2. Paths 1.9.6.2.1. Create a MultiClusterEngine 1.9.6.2.1.1. Description Create a MultiClusterEngine. 1.9.6.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body required Parameters describing the MultiClusterEngine to be created. MultiClusterEngine 1.9.6.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.6.2.1.4. Consumes MultiClusterEngines/yaml 1.9.6.2.1.5. Tags multiclusterengines.multicluster.openshift.io 1.9.6.2.1.5.1. Request body { "apiVersion": "apiextensions.k8s.io/v1", "kind": "CustomResourceDefinition", "metadata": { "annotations": { "controller-gen.kubebuilder.io/version": "v0.4.1" }, "creationTimestamp": null, "name": "multiclusterengines.multicluster.openshift.io" }, "spec": { "group": "multicluster.openshift.io", "names": { "kind": "MultiClusterEngine", "listKind": "MultiClusterEngineList", "plural": "multiclusterengines", "shortNames": [ "mce" ], "singular": "multiclusterengine" }, "scope": "Cluster", "versions": [ { "additionalPrinterColumns": [ { "description": "The overall state of the MultiClusterEngine", "jsonPath": ".status.phase", "name": "Status", "type": "string" }, { "jsonPath": ".metadata.creationTimestamp", "name": "Age", "type": "date" } ], "name": "v1alpha1", "schema": { "openAPIV3Schema": { "description": "MultiClusterEngine is the Schema for the multiclusterengines\nAPI", "properties": { "apiVersion": { "description": "APIVersion defines the versioned schema of this representation\nof an object. Servers should convert recognized schemas to the latest\ninternal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, "kind": { "description": "Kind is a string value representing the REST resource this\nobject represents. Servers may infer this from the endpoint the client\nsubmits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, "metadata": { "type": "object" }, "spec": { "description": "MultiClusterEngineSpec defines the desired state of MultiClusterEngine", "properties": { "imagePullSecret": { "description": "Override pull secret for accessing MultiClusterEngine\noperand and endpoint images", "type": "string" }, "nodeSelector": { "additionalProperties": { "type": "string" }, "description": "Set the nodeselectors", "type": "object" }, "targetNamespace": { "description": "Location where MCE resources will be placed", "type": "string" }, "tolerations": { "description": "Tolerations causes all components to tolerate any taints.", "items": { "description": "The pod this Toleration is attached to tolerates any\ntaint that matches the triple <key,value,effect> using the matching\noperator <operator>.", "properties": { "effect": { "description": "Effect indicates the taint effect to match. Empty\nmeans match all taint effects. When specified, allowed values\nare NoSchedule, PreferNoSchedule and NoExecute.", "type": "string" }, "key": { "description": "Key is the taint key that the toleration applies\nto. Empty means match all taint keys. If the key is empty,\noperator must be Exists; this combination means to match all\nvalues and all keys.", "type": "string" }, "operator": { "description": "Operator represents a key's relationship to the\nvalue. Valid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod\ncan tolerate all taints of a particular category.", "type": "string" }, "tolerationSeconds": { "description": "TolerationSeconds represents the period of time\nthe toleration (which must be of effect NoExecute, otherwise\nthis field is ignored) tolerates the taint. By default, it\nis not set, which means tolerate the taint forever (do not\nevict). Zero and negative values will be treated as 0 (evict\nimmediately) by the system.", "format": "int64", "type": "integer" }, "value": { "description": "Value is the taint value the toleration matches\nto. If the operator is Exists, the value should be empty,\notherwise just a regular string.", "type": "string" } }, "type": "object" }, "type": "array" } }, "type": "object" }, "status": { "description": "MultiClusterEngineStatus defines the observed state of MultiClusterEngine", "properties": { "components": { "items": { "description": "ComponentCondition contains condition information for\ntracked components", "properties": { "kind": { "description": "The resource kind this condition represents", "type": "string" }, "lastTransitionTime": { "description": "LastTransitionTime is the last time the condition\nchanged from one status to another.", "format": "date-time", "type": "string" }, "message": { "description": "Message is a human-readable message indicating\ndetails about the last status change.", "type": "string" }, "name": { "description": "The component name", "type": "string" }, "reason": { "description": "Reason is a (brief) reason for the condition's\nlast status change.", "type": "string" }, "status": { "description": "Status is the status of the condition. One of True,\nFalse, Unknown.", "type": "string" }, "type": { "description": "Type is the type of the cluster condition.", "type": "string" } }, "type": "object" }, "type": "array" }, "conditions": { "items": { "properties": { "lastTransitionTime": { "description": "LastTransitionTime is the last time the condition\nchanged from one status to another.", "format": "date-time", "type": "string" }, "lastUpdateTime": { "description": "The last time this condition was updated.", "format": "date-time", "type": "string" }, "message": { "description": "Message is a human-readable message indicating\ndetails about the last status change.", "type": "string" }, "reason": { "description": "Reason is a (brief) reason for the condition's\nlast status change.", "type": "string" }, "status": { "description": "Status is the status of the condition. One of True,\nFalse, Unknown.", "type": "string" }, "type": { "description": "Type is the type of the cluster condition.", "type": "string" } }, "type": "object" }, "type": "array" }, "phase": { "description": "Latest observed overall state", "type": "string" } }, "type": "object" } }, "type": "object" } }, "served": true, "storage": true, "subresources": { "status": {} } } ] }, "status": { "acceptedNames": { "kind": "", "plural": "" }, "conditions": [], "storedVersions": [] } } 1.9.6.2.2. Query all MultiClusterEngines 1.9.6.2.2.1. Description Query your multicluster engine for more details. 1.9.6.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string 1.9.6.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.6.2.2.4. Consumes operator/yaml 1.9.6.2.2.5. Tags multiclusterengines.multicluster.openshift.io 1.9.6.2.3. Delete a MultiClusterEngine operator 1.9.6.2.3.1. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path name required Name of the multiclusterengine that you want to delete. string 1.9.6.2.3.2. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.6.2.3.3. Tags multiclusterengines.multicluster.openshift.io 1.9.6.3. Definitions 1.9.6.3.1. MultiClusterEngine Name Description Schema apiVersion required The versioned schema of the MultiClusterEngines. string kind required String value that represents the REST resource. string metadata required Describes rules that define the resource. object spec required MultiClusterEngineSpec defines the desired state of MultiClusterEngine. See List of specs 1.9.6.3.2. List of specs Name Description Schema nodeSelector optional Set the nodeselectors. map[string]string imagePullSecret optional Override pull secret for accessing MultiClusterEngine operand and endpoint images. string tolerations optional Tolerations causes all components to tolerate any taints. []corev1.Toleration targetNamespace optional Location where MCE resources will be placed. string 1.9.7. Placements API (v1beta1) 1.9.7.1. Overview This documentation is for the Placement resource for multicluster engine for Kubernetes. Placement resource has four possible requests: create, query, delete and update. 1.9.7.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.7.1.2. Tags cluster.open-cluster-management.io : Create and manage Placements 1.9.7.2. Paths 1.9.7.2.1. Query all Placements 1.9.7.2.1.1. Description Query your Placements for more details. 1.9.7.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string 1.9.7.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.7.2.1.4. Consumes placement/yaml 1.9.7.2.1.5. Tags cluster.open-cluster-management.io 1.9.7.2.2. Create a Placement 1.9.7.2.2.1. Description Create a Placement. 1.9.7.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body required Parameters describing the placement to be created. Placement 1.9.7.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.7.2.2.4. Consumes placement/yaml 1.9.7.2.2.5. Tags cluster.open-cluster-management.io 1.9.7.2.2.6. Example HTTP request 1.9.7.2.2.6.1. Request body { "apiVersion" : "cluster.open-cluster-management.io/v1beta1", "kind" : "Placement", "metadata" : { "name" : "placement1", "namespace": "ns1" }, "spec": { "predicates": [ { "requiredClusterSelector": { "labelSelector": { "matchLabels": { "vendor": "OpenShift" } } } } ] }, "status" : { } } 1.9.7.2.3. Query a single Placement 1.9.7.2.3.1. Description Query a single Placement for more details. 1.9.7.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path placement_name required Name of the Placement that you want to query. string 1.9.7.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.7.2.3.4. Tags cluster.open-cluster-management.io 1.9.7.2.4. Delete a Placement 1.9.7.2.4.1. Description Delete a single Placement. 1.9.7.2.4.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path placement_name required Name of the Placement that you want to delete. string 1.9.7.2.4.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.7.2.4.4. Tags cluster.open-cluster-management.io 1.9.7.3. Definitions 1.9.7.3.1. Placement Name Description Schema apiVersion required The versioned schema of the Placement. string kind required String value that represents the REST resource. string metadata required The meta data of the Placement. object spec required The specification of the Placement. spec spec Name Description Schema ClusterSets optional A subset of ManagedClusterSets from which the ManagedClusters are selected. If it is empty, ManagedClusters is selected from the ManagedClusterSets that are bound to the Placement namespace. Otherwise, ManagedClusters are selected from the intersection of this subset and the ManagedClusterSets are bound to the placement namespace. string array numberOfClusters optional The desired number of ManagedClusters to be selected. integer (int32) predicates optional A subset of cluster predicates to select ManagedClusters. The conditional logic is OR . clusterPredicate array clusterPredicate Name Description Schema requiredClusterSelector optional A cluster selector to select ManagedClusters with a label and cluster claim. clusterSelector clusterSelector Name Description Schema labelSelector optional A selector of ManagedClusters by label. object claimSelector optional A selector of ManagedClusters by claim. clusterClaimSelector clusterClaimSelector Name Description Schema matchExpressions optional A subset of the cluster claim selector requirements. The conditional logic is AND . < object > array 1.9.8. PlacementDecisions API (v1beta1) 1.9.8.1. Overview This documentation is for the PlacementDecision resource for multicluster engine for Kubernetes. PlacementDecision resource has four possible requests: create, query, delete and update. 1.9.8.1.1. URI scheme BasePath : /kubernetes/apis Schemes : HTTPS 1.9.8.1.2. Tags cluster.open-cluster-management.io : Create and manage PlacementDecisions. 1.9.8.2. Paths 1.9.8.2.1. Query all PlacementDecisions 1.9.8.2.1.1. Description Query your PlacementDecisions for more details. 1.9.8.2.1.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string 1.9.8.2.1.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.8.2.1.4. Consumes placementdecision/yaml 1.9.8.2.1.5. Tags cluster.open-cluster-management.io 1.9.8.2.2. Create a PlacementDecision 1.9.8.2.2.1. Description Create a PlacementDecision. 1.9.8.2.2.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Body body required Parameters describing the PlacementDecision to be created. PlacementDecision 1.9.8.2.2.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.8.2.2.4. Consumes placementdecision/yaml 1.9.8.2.2.5. Tags cluster.open-cluster-management.io 1.9.8.2.2.6. Example HTTP request 1.9.8.2.2.6.1. Request body { "apiVersion" : "cluster.open-cluster-management.io/v1beta1", "kind" : "PlacementDecision", "metadata" : { "labels" : { "cluster.open-cluster-management.io/placement" : "placement1" }, "name" : "placement1-decision1", "namespace": "ns1" }, "status" : { } } 1.9.8.2.3. Query a single PlacementDecision 1.9.8.2.3.1. Description Query a single PlacementDecision for more details. 1.9.8.2.3.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path placementdecision_name required Name of the PlacementDecision that you want to query. string 1.9.8.2.3.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.8.2.3.4. Tags cluster.open-cluster-management.io 1.9.8.2.4. Delete a PlacementDecision 1.9.8.2.4.1. Description Delete a single PlacementDecision. 1.9.8.2.4.2. Parameters Type Name Description Schema Header COOKIE required Authorization: Bearer {ACCESS_TOKEN} ; ACCESS_TOKEN is the user access token. string Path placementdecision_name required Name of the PlacementDecision that you want to delete. string 1.9.8.2.4.3. Responses HTTP Code Description Schema 200 Success No Content 403 Access forbidden No Content 404 Resource not found No Content 500 Internal service error No Content 503 Service unavailable No Content 1.9.8.2.4.4. Tags cluster.open-cluster-management.io 1.9.8.3. Definitions 1.9.8.3.1. PlacementDecision Name Description Schema apiVersion required The versioned schema of PlacementDecision. string kind required String value that represents the REST resource. string metadata required The meta data of PlacementDecision. object 1.10. Troubleshooting Before using the Troubleshooting guide, you can run the oc adm must-gather command to gather details, logs, and take steps in debugging issues. For more details, see Running the must-gather command to troubleshoot . Additionally, check your role-based access. See multicluster engine operator Role-based access control for details. 1.10.1. Documented troubleshooting View the list of troubleshooting topics for the multicluster engine operator: Installation: To view the main documentation for the installing tasks, see Installing and upgrading multicluster engine operator . Troubleshooting installation status stuck in installing or pending Troubleshooting reinstallation failure Cluster management: To view the main documentation about managing your clusters, see Cluster lifecycle introduction . Troubleshooting adding day-two nodes to an existing cluster fails with pending user action Troubleshooting an offline cluster Troubleshooting a managed cluster import failure Reimporting cluster fails with unknown authority error Troubleshooting cluster with pending import status Troubleshooting imported clusters offline after certificate change Troubleshooting cluster status changing from offline to available Troubleshooting cluster creation on VMware vSphere Troubleshooting cluster in console with pending or failed status Troubleshooting OpenShift Container Platform version 3.11 cluster import failure Troubleshooting Klusterlet with degraded conditions Namespace remains after deleting a cluster Auto-import-secret-exists error when importing a cluster Troubleshooting missing PlacementDecision after creating Placement Troubleshooting a discovery failure of bare metal hosts on Dell hardware Troubleshooting Minimal ISO boot failures Troubleshooting installation status stuck at Installing on a ROSA with hosted control planes cluster Troubleshooting all managed clusters become Unknown on a ROSA with hosted control planes cluster Troubleshooting an attempt to upgrade managed cluster with missing OpenShift Container Platform version 1.10.2. Running the must-gather command to troubleshoot To get started with troubleshooting, learn about the troubleshooting scenarios for users to run the must-gather command to debug the issues, then see the procedures to start using the command. Required access: Cluster administrator 1.10.2.1. Must-gather scenarios Scenario one: Use the Documented troubleshooting section to see if a solution to your problem is documented. The guide is organized by the major functions of the product. With this scenario, you check the guide to see if your solution is in the documentation. Scenario two: If your problem is not documented with steps to resolve, run the must-gather command and use the output to debug the issue. Scenario three: If you cannot debug the issue using your output from the must-gather command, then share your output with Red Hat Support. 1.10.2.2. Must-gather procedure See the following procedure to start using the must-gather command: Learn about the must-gather command and install the prerequisites that you need at Gathering data about your cluster in the OpenShift Container Platform documentation. Log in to your cluster. For the usual use-case, you should run the must-gather while you are logged into your engine cluster. Note: If you want to check your managed clusters, find the gather-managed.log file that is located in the cluster-scoped-resources directory: Check for managed clusters that are not set True for the JOINED and AVAILABLE column. You can run the must-gather command on those clusters that are not connected with True status. Add the multicluster engine for Kubernetes image that is used for gathering data and the directory. Run the following command: Go to your specified directory to see your output, which is organized in the following levels: Two peer levels: cluster-scoped-resources and namespace resources. Sub-level for each: API group for the custom resource definitions for both cluster-scope and namespace-scoped resources. level for each: YAML file sorted by kind . 1.10.2.3. Must-gather in a disconnected environment Complete the following steps to run the must-gather command in a disconnected environment: In a disconnected environment, mirror the Red Hat operator catalog images into their mirror registry. For more information, see Install on disconnected networks . Run the following command to extract logs, which reference the image from their mirror registry. Replace sha256 with the current image: You can open a Jira bug for the product team here . Running the must-gather command to troubleshoot 1.10.2.4. Must-gather for a hosted cluster If you experience issues with hosted control plane clusters, you can run the must-gather command to gather information for troubleshooting. Required access: Cluster administration 1.10.2.4.1. About the must-gather command for hosted clusters The must-gather command generates output for the management cluster and the hosted cluster. Learn more about the data that is collected. The following data is collected from the multicluster engine operator hub cluster: Cluster-scoped resources: These resources are node definitions of the management cluster. The hypershift-dump compressed file: This file is useful if you need to share the content with other people. Namespaced resources: These resources include all of the objects from the relevant namespaces, such as config maps, services, events, and logs. Network logs: These logs include the OVN northbound and southbound databases and the status for each one. Hosted clusters: This level of output involves all of the resources inside of the hosted cluster. The following data is collected data from the hosted cluster: Cluster-scoped resources: These resources include all of the cluster-wide objects, such as nodes and CRDs. Namespaced resources: These resources include all of the objects from the relevant namespaces, such as config maps, services, events, and logs. Although the output does not contain any secret objects from the cluster, it can contain references to the names of the secrets. 1.10.2.4.2. Prerequisites To gather information by running the must-gather command, you must meet the following prerequisites: You must ensure that the kubeconfig file is loaded and is pointing to the multicluster engine operator hub cluster. You must have the name value for the HostedCluster resource and the namespace where the custom resource is deployed. 1.10.2.4.3. Entering the must-gather command for hosted clusters See the following process to collect information from the must-gather command: Enter the following command to collect information about the hosted cluster. Replace <v2.x> with the version that you are using: The hosted-cluster-namespace=HOSTEDCLUSTERNAMESPACE parameter is optional. If you do not include the parameter, the command runs as though the hosted cluster is in the default namespace, which is clusters . To save the results of the command to a compressed file, run the same command and add the --dest-dir=NAME parameter, which is optional. Replace NAME with the name of the directory where you want to save the results. Replace <v2.x> with the version that you are using. See the following command with the optional parameter: 1.10.2.4.4. Entering the must-gather command in a disconnected environment Complete the following steps to run the must-gather command in a disconnected environment: In a disconnected environment, mirror the Red Hat operator catalog images into their mirror registry. For more information, see Install on disconnected networks . Run the following command to extract logs, which reference the image from their mirror registry: 1.10.2.4.5. Additional resources For more information about troubleshooting hosted control planes, see Troubleshooting hosted control planes in the OpenShift Container Platform documentation. 1.10.3. Troubleshooting: Adding day-two nodes to an existing cluster fails with pending user action Adding a node, or scaling out, to your existing cluster that is created by the multicluster engine for Kubernetes operator with Zero Touch Provisioning or Host inventory create methods fails during installation. The installation process works correctly during the Discovery phase, but fails on the installation phase. The configuration of the network is failing. From the hub cluster in the integrated console, you see a Pending user action. In the description, you can see it failing on the rebooting step. The error message about failing is not very accurate, since the agent that is running in the installing host cannot report information. 1.10.3.1. Symptom: Installation for day two workers fails After the Discover phase, the host reboots to continue the installation, but it cannot configure the network. Check for the following symptoms and messages: From the hub cluster in the integrated console, check for Pending user action on the adding node, with the Rebooting indicator: From the Red Hat OpenShift Container Platform configuration managed cluster, check the MachineConfigs of the existing cluster. Check if any of the MachineConfigs create any file on the following directories: /sysroot/etc/NetworkManager/system-connections/ /sysroot/etc/sysconfig/network-scripts/ From the terminal of the installing host, check the failing host for the following messages. You can use journalctl to see the log messages: If you get the last message in the log, the networking configuration is not propagated because it already found an existing network configuration on the folders previously listed in the Symptom . 1.10.3.2. Resolving the problem: Recreate the node merging network configuration Perform the following task to use a proper network configuration during the installation: Delete the node from your hub cluster. Repeat your process to install the node in the same way. Create the BareMetalHost object of the node with the following annotation: "bmac.agent-install.openshift.io/installer-args": "[\"--append-karg\", \"coreos.force_persist_ip\"]" The node starts the installation. After the Discovery phase, the node merges the network configuration between the changes on the existing cluster and the initial configuration. 1.10.4. Troubleshooting deletion failure of a hosted control plane cluster on the Agent platform When you destroy a hosted control plane cluster on the Agent platform, all the back-end resources are normally deleted. If the machine resources are not deleted properly, a cluster deletion fails. In that case, you must manually remove the remaining machine resources. 1.10.4.1. Symptom: An error occurs when destroying a hosted control plane cluster After you attempt to destroy the hosted control plane cluster on the Agent platform, the hcp destroy command fails with the following error: + 1.10.4.2. Resolving the problem: Remove the remaining machine resources manually Complete the following steps to destroy a hosted control plane cluster successfully on the Agent platform: Run the following command to see the list of remaining machine resources by replacing <hosted_cluster_namespace> with the name of hosted cluster namespace: See the following example output: Run the following command to remove the machine.cluster.x-k8s.io finalizer attached to machine resources: Run the following command to verify you receive the No resources found message on your terminal: Run the following command to destroy a hosted control plane cluster on the Agent platform: Replace <cluster_name> with the name of your cluster. 1.10.5. Troubleshooting installation status stuck in installing or pending When installing the multicluster engine operator, the MultiClusterEngine remains in Installing phase, or multiple pods maintain a Pending status. 1.10.5.1. Symptom: Stuck in Pending status More than ten minutes passed since you installed MultiClusterEngine and one or more components from the status.components field of the MultiClusterEngine resource report ProgressDeadlineExceeded . Resource constraints on the cluster might be the issue. Check the pods in the namespace where MultiClusterEngine was installed. You might see Pending with a status similar to the following: In this case, the worker nodes resources are not sufficient in the cluster to run the product. 1.10.5.2. Resolving the problem: Adjust worker node sizing If you have this problem, then your cluster needs to be updated with either larger or more worker nodes. See Sizing your cluster for guidelines on sizing your cluster. 1.10.6. Troubleshooting reinstallation failure When reinstalling multicluster engine operator, the pods do not start. 1.10.6.1. Symptom: Reinstallation failure If your pods do not start after you install the multicluster engine operator, it is often because items from a installation of multicluster engine operator were not removed correctly when it was uninstalled. In this case, the pods do not start after completing the installation process. 1.10.6.2. Resolving the problem: Reinstallation failure If you have this problem, complete the following steps: Run the uninstallation process to remove the current components by following the steps in Uninstalling . Install the Helm CLI binary version 3.2.0, or later, by following the instructions at Installing Helm . Ensure that your Red Hat OpenShift Container Platform CLI is configured to run oc commands. See Getting started with the OpenShift CLI in the OpenShift Container Platform documentation for more information about how to configure the oc commands. Copy the following script into a file: Replace <namespace> in the script with the name of the namespace where multicluster engine operator was installed. Ensure that you specify the correct namespace, as the namespace is cleaned out and deleted. Run the script to remove the artifacts from the installation. Run the installation. See Installing while connected online . 1.10.7. Troubleshooting an offline cluster There are a few common causes for a cluster showing an offline status. 1.10.7.1. Symptom: Cluster status is offline After you complete the procedure for creating a cluster, you cannot access it from the Red Hat Advanced Cluster Management console, and it shows a status of offline . 1.10.7.2. Resolving the problem: Cluster status is offline Determine if the managed cluster is available. You can check this in the Clusters area of the Red Hat Advanced Cluster Management console. If it is not available, try restarting the managed cluster. If the managed cluster status is still offline, complete the following steps: Run the oc get managedcluster <cluster_name> -o yaml command on the hub cluster. Replace <cluster_name> with the name of your cluster. Find the status.conditions section. Check the messages for type: ManagedClusterConditionAvailable and resolve any problems. 1.10.8. Troubleshooting a managed cluster import failure If your cluster import fails, there are a few steps that you can take to determine why the cluster import failed. 1.10.8.1. Symptom: Imported cluster not available After you complete the procedure for importing a cluster, you cannot access it from the console. 1.10.8.2. Resolving the problem: Imported cluster not available There can be a few reasons why an imported cluster is not available after an attempt to import it. If the cluster import fails, complete the following steps, until you find the reason for the failed import: On the hub cluster, run the following command to ensure that the import controller is running. You should see two pods that are running. If either of the pods is not running, run the following command to view the log to determine the reason: On the hub cluster, run the following command to determine if the managed cluster import secret was generated successfully by the import controller: If the import secret does not exist, run the following command to view the log entries for the import controller and determine why it was not created: On the hub cluster, if your managed cluster is local-cluster , provisioned by Hive, or has an auto-import secret, run the following command to check the import status of the managed cluster. If the condition ManagedClusterImportSucceeded is not true , the result of the command indicates the reason for the failure. Check the Klusterlet status of the managed cluster for a degraded condition. See Troubleshooting Klusterlet with degraded conditions to find the reason that the Klusterlet is degraded. 1.10.9. Reimporting cluster fails with unknown authority error If you experience a problem when reimporting a managed cluster to your multicluster engine operator hub cluster, follow the procedure to troubleshoot the problem. 1.10.9.1. Symptom: Reimporting cluster fails with unknown authority error After you provision an OpenShift Container Platform cluster with multicluster engine operator, reimporting the cluster might fail with a x509: certificate signed by unknown authority error when you change or add API server certificates to your OpenShift Container Platform cluster. 1.10.9.2. Identifying the problem: Reimporting cluster fails with unknown authority error After failing to reimport your managed cluster, run the following command to get the import controller log on your multicluster engine operator hub cluster: If the following error log appears, your managed cluster API server certificates might have changed: ERROR Reconciler error {"controller": "clusterdeployment-controller", "object": {"name":"awscluster1","namespace":"awscluster1"}, "namespace": "awscluster1", "name": "awscluster1", "reconcileID": "a2cccf24-2547-4e26-95fb-f258a6710d80", "error": "Get \"https://api.awscluster1.dev04.red-chesterfield.com:6443/api?timeout=32s\": x509: certificate signed by unknown authority"} To determine if your managed cluster API server certificates have changed, complete the following steps: Run the following command to specify your managed cluster name by replacing your-managed-cluster-name with the name of your managed cluster: Get your managed cluster kubeconfig secret name by running the following command: Export kubeconfig to a new file by running the following commands: Get the namespace from your managed cluster with kubeconfig by running the following command: If you receive an error that resembles the following message, your cluster API server ceritificates have been changed and your kubeconfig file is invalid. Unable to connect to the server: x509: certificate signed by unknown authority 1.10.9.3. Resolving the problem: Reimporting cluster fails with unknown authority error The managed cluster administrator must create a new valid kubeconfig file for your managed cluster. After creating a new kubeconfig , complete the following steps to update the new kubeconfig for your managed cluster: Run the following commands to set your kubeconfig file path and cluster name. Replace <path_to_kubeconfig> with the path to your new kubeconfig file. Replace <managed_cluster_name> with the name of your managed cluster: Run the following command to encode your new kubeconfig : Note: On macOS, run the following command instead: Run the following command to define the kubeconfig json patch: Retrieve your administrator kubeconfig secret name from your managed cluster by running the following command: Patch your administrator kubeconfig secret with your new kubeconfig by running the following command: 1.10.10. Troubleshooting cluster with pending import status If you receive Pending import continually on the console of your cluster, follow the procedure to troubleshoot the problem. 1.10.10.1. Symptom: Cluster with pending import status After importing a cluster by using the Red Hat Advanced Cluster Management console, the cluster appears in the console with a status of Pending import . 1.10.10.2. Identifying the problem: Cluster with pending import status Run the following command on the managed cluster to view the Kubernetes pod names that are having the issue: Run the following command on the managed cluster to find the log entry for the error: Replace registration_agent_pod with the pod name that you identified in step 1. Search the returned results for text that indicates there was a networking connectivity problem. Example includes: no such host . 1.10.10.3. Resolving the problem: Cluster with pending import status Retrieve the port number that is having the problem by entering the following command on the hub cluster: Ensure that the hostname from the managed cluster can be resolved, and that outbound connectivity to the host and port is occurring. If the communication cannot be established by the managed cluster, the cluster import is not complete. The cluster status for the managed cluster is Pending import . 1.10.11. Troubleshooting imported clusters offline after certificate change Installing a custom apiserver certificate is supported, but one or more clusters that were imported before you changed the certificate information can have an offline status. 1.10.11.1. Symptom: Clusters offline after certificate change After you complete the procedure for updating a certificate secret, one or more of your clusters that were online are now displaying an offline status in the console. 1.10.11.2. Identifying the problem: Clusters offline after certificate change After updating the information for a custom API server certificate, clusters that were imported and running before the new certificate are now in an offline state. The errors that indicate that the certificate is the problem are found in the logs for the pods in the open-cluster-management-agent namespace of the offline managed cluster. The following examples are similar to the errors that are displayed in the logs: See the following work-agent log: See the following registration-agent log: 1.10.11.3. Resolving the problem: Clusters offline after certificate change If your managed cluster is the local-cluster or your managed cluster was created by multicluster engine operator, you must wait 10 minutes or longer to recover your managed cluster. To recover your managed cluster immediately, you can delete your managed cluster import secret on the hub cluster and recover it by using multicluster engine operator. Run the following command: Replace <cluster_name> with the name of the managed cluster that you want to recover. If you want to recover a managed cluster that was imported by using multicluster engine operator, complete the following steps import the managed cluster again: On the hub cluster, recreate the managed cluster import secret by running the following command: Replace <cluster_name> with the name of the managed cluster that you want to import. On the hub cluster, expose the managed cluster import secret to a YAML file by running the following command: Replace <cluster_name> with the name of the managed cluster that you want to import. On the managed cluster, apply the import.yaml file by running the following command: Note: The steps do not detach the managed cluster from the hub cluster. The steps update the required manifests with current settings on the managed cluster, including the new certificate information. 1.10.12. Troubleshooting cluster status changing from offline to available The status of the managed cluster alternates between offline and available without any manual change to the environment or cluster. 1.10.12.1. Symptom: Cluster status changing from offline to available When the network that connects the managed cluster to the hub cluster is unstable, the status of the managed cluster that is reported by the hub cluster cycles between offline and available . 1.10.12.2. Resolving the problem: Cluster status changing from offline to available To attempt to resolve this issue, complete the following steps: Edit your ManagedCluster specification on the hub cluster by entering the following command: Replace cluster-name with the name of your managed cluster. Increase the value of leaseDurationSeconds in your ManagedCluster specification. The default value is 5 minutes, but that might not be enough time to maintain the connection with the network issues. Specify a greater amount of time for the lease. For example, you can raise the setting to 20 minutes. 1.10.13. Troubleshooting cluster creation on VMware vSphere If you experience a problem when creating a Red Hat OpenShift Container Platform cluster on VMware vSphere, see the following troubleshooting information to see if one of them addresses your problem. Note: Sometimes when the cluster creation process fails on VMware vSphere, the link is not enabled for you to view the logs. If this happens, you can identify the problem by viewing the log of the hive-controllers pod. The hive-controllers log is in the hive namespace. 1.10.13.1. Managed cluster creation fails with certificate IP SAN error 1.10.13.1.1. Symptom: Managed cluster creation fails with certificate IP SAN error After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails with an error message that indicates a certificate IP SAN error. 1.10.13.1.2. Identifying the problem: Managed cluster creation fails with certificate IP SAN error The deployment of the managed cluster fails and returns the following errors in the deployment log: 1.10.13.1.3. Resolving the problem: Managed cluster creation fails with certificate IP SAN error Use the VMware vCenter server fully-qualified host name instead of the IP address in the credential. You can also update the VMware vCenter CA certificate to contain the IP SAN. 1.10.13.2. Managed cluster creation fails with unknown certificate authority 1.10.13.2.1. Symptom: Managed cluster creation fails with unknown certificate authority After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails because the certificate is signed by an unknown authority. 1.10.13.2.2. Identifying the problem: Managed cluster creation fails with unknown certificate authority The deployment of the managed cluster fails and returns the following errors in the deployment log: 1.10.13.2.3. Resolving the problem: Managed cluster creation fails with unknown certificate authority Ensure you entered the correct certificate from the certificate authority when creating the credential. 1.10.13.3. Managed cluster creation fails with expired certificate 1.10.13.3.1. Symptom: Managed cluster creation fails with expired certificate After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails because the certificate is expired or is not yet valid. 1.10.13.3.2. Identifying the problem: Managed cluster creation fails with expired certificate The deployment of the managed cluster fails and returns the following errors in the deployment log: 1.10.13.3.3. Resolving the problem: Managed cluster creation fails with expired certificate Ensure that the time on your ESXi hosts is synchronized. 1.10.13.4. Managed cluster creation fails with insufficient privilege for tagging 1.10.13.4.1. Symptom: Managed cluster creation fails with insufficient privilege for tagging After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails because there is insufficient privilege to use tagging. 1.10.13.4.2. Identifying the problem: Managed cluster creation fails with insufficient privilege for tagging The deployment of the managed cluster fails and returns the following errors in the deployment log: 1.10.13.4.3. Resolving the problem: Managed cluster creation fails with insufficient privilege for tagging Ensure that your VMware vCenter required account privileges are correct. See Image registry removed during information for more information. 1.10.13.5. Managed cluster creation fails with invalid dnsVIP 1.10.13.5.1. Symptom: Managed cluster creation fails with invalid dnsVIP After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails because there is an invalid dnsVIP. 1.10.13.5.2. Identifying the problem: Managed cluster creation fails with invalid dnsVIP If you see the following message when trying to deploy a new managed cluster with VMware vSphere, it is because you have an older OpenShift Container Platform release image that does not support VMware Installer Provisioned Infrastructure (IPI): 1.10.13.5.3. Resolving the problem: Managed cluster creation fails with invalid dnsVIP Select a release image from a later version of OpenShift Container Platform that supports VMware Installer Provisioned Infrastructure. 1.10.13.6. Managed cluster creation fails with incorrect network type 1.10.13.6.1. Symptom: Managed cluster creation fails with incorrect network type After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails because there is an incorrect network type specified. 1.10.13.6.2. Identifying the problem: Managed cluster creation fails with incorrect network type If you see the following message when trying to deploy a new managed cluster with VMware vSphere, it is because you have an older OpenShift Container Platform image that does not support VMware Installer Provisioned Infrastructure (IPI): 1.10.13.6.3. Resolving the problem: Managed cluster creation fails with incorrect network type Select a valid VMware vSphere network type for the specified VMware cluster. 1.10.13.7. Managed cluster creation fails with an error processing disk changes 1.10.13.7.1. Symptom: Adding the VMware vSphere managed cluster fails due to an error processing disk changes After creating a new Red Hat OpenShift Container Platform cluster on VMware vSphere, the cluster fails because there is an error when processing disk changes. 1.10.13.7.2. Identifying the problem: Adding the VMware vSphere managed cluster fails due to an error processing disk changes A message similar to the following is displayed in the logs: 1.10.13.7.3. Resolving the problem: Adding the VMware vSphere managed cluster fails due to an error processing disk changes Use the VMware vSphere client to give the user All privileges for Profile-driven Storage Privileges . 1.10.14. Troubleshooting cluster in console with pending or failed status If you observe Pending status or Failed status in the console for a cluster you created, follow the procedure to troubleshoot the problem. 1.10.14.1. Symptom: Cluster in console with pending or failed status After creating a new cluster by using the console, the cluster does not progress beyond the status of Pending or displays Failed status. 1.10.14.2. Identifying the problem: Cluster in console with pending or failed status If the cluster displays Failed status, navigate to the details page for the cluster and follow the link to the logs provided. If no logs are found or the cluster displays Pending status, continue with the following procedure to check for logs: Procedure 1 Run the following command on the hub cluster to view the names of the Kubernetes pods that were created in the namespace for the new cluster: Replace new_cluster_name with the name of the cluster that you created. If no pod that contains the string provision in the name is listed, continue with Procedure 2. If there is a pod with provision in the title, run the following command on the hub cluster to view the logs of that pod: Replace new_cluster_name_provision_pod_name with the name of the cluster that you created, followed by the pod name that contains provision . Search for errors in the logs that might explain the cause of the problem. Procedure 2 If there is not a pod with provision in its name, the problem occurred earlier in the process. Complete the following procedure to view the logs: Run the following command on the hub cluster: Replace new_cluster_name with the name of the cluster that you created. For more information about cluster installation logs, see Gathering installation logs in the Red Hat OpenShift documentation. See if there is additional information about the problem in the Status.Conditions.Message and Status.Conditions.Reason entries of the resource. 1.10.14.3. Resolving the problem: Cluster in console with pending or failed status After you identify the errors in the logs, determine how to resolve the errors before you destroy the cluster and create it again. The following example provides a possible log error of selecting an unsupported zone, and the actions that are required to resolve it: When you created your cluster, you selected one or more zones within a region that are not supported. Complete one of the following actions when you recreate your cluster to resolve the issue: Select a different zone within the region. Omit the zone that does not provide the support, if you have other zones listed. Select a different region for your cluster. After determining the issues from the log, destroy the cluster and recreate it. See Cluster creation introduction for more information about creating a cluster. 1.10.15. Troubleshooting OpenShift Container Platform version 3.11 cluster import failure 1.10.15.1. Symptom: OpenShift Container Platform version 3.11 cluster import failure After you attempt to import a Red Hat OpenShift Container Platform version 3.11 cluster, the import fails with a log message that resembles the following content: 1.10.15.2. Identifying the problem: OpenShift Container Platform version 3.11 cluster import failure This often occurs because the installed version of the kubectl command-line tool is 1.11, or earlier. Run the following command to see which version of the kubectl command-line tool you are running: If the returned data lists version 1.11, or earlier, complete one of the fixes in Resolving the problem: OpenShift Container Platform version 3.11 cluster import failure . 1.10.15.3. Resolving the problem: OpenShift Container Platform version 3.11 cluster import failure You can resolve this issue by completing one of the following procedures: Install the latest version of the kubectl command-line tool. Download the latest version of the kubectl tool from Install and Set Up kubectl in the Kubernetes documentation. Import the cluster again after upgrading your kubectl tool. Run a file that contains the import command. Start the procedure in Importing a managed cluster with the CLI . When you create the command to import your cluster, copy that command into a YAML file named import.yaml . Run the following command to import the cluster again from the file: 1.10.16. Troubleshooting Klusterlet with degraded conditions The Klusterlet degraded conditions can help to diagnose the status of Klusterlet agents on managed cluster. If a Klusterlet is in the degraded condition, the Klusterlet agents on managed cluster might have errors that need to be troubleshooted. See the following information for Klusterlet degraded conditions that are set to True . 1.10.16.1. Symptom: Klusterlet is in the degraded condition After deploying a Klusterlet on managed cluster, the KlusterletRegistrationDegraded or KlusterletWorkDegraded condition displays a status of True . 1.10.16.2. Identifying the problem: Klusterlet is in the degraded condition Run the following command on the managed cluster to view the Klusterlet status: Check KlusterletRegistrationDegraded or KlusterletWorkDegraded to see if the condition is set to True . Proceed to Resolving the problem for any degraded conditions that are listed. 1.10.16.3. Resolving the problem: Klusterlet is in the degraded condition See the following list of degraded statuses and how you can attempt to resolve those issues: If the KlusterletRegistrationDegraded condition with a status of True and the condition reason is: BootStrapSecretMissing , you need create a bootstrap secret on open-cluster-management-agent namespace. If the KlusterletRegistrationDegraded condition displays True and the condition reason is a BootstrapSecretError , or BootstrapSecretUnauthorized , then the current bootstrap secret is invalid. Delete the current bootstrap secret and recreate a valid bootstrap secret on open-cluster-management-agent namespace. If the KlusterletRegistrationDegraded and KlusterletWorkDegraded displays True and the condition reason is HubKubeConfigSecretMissing , delete the Klusterlet and recreate it. If the KlusterletRegistrationDegraded and KlusterletWorkDegraded displays True and the condition reason is: ClusterNameMissing , KubeConfigMissing , HubConfigSecretError , or HubConfigSecretUnauthorized , delete the hub cluster kubeconfig secret from open-cluster-management-agent namespace. The registration agent will bootstrap again to get a new hub cluster kubeconfig secret. If the KlusterletRegistrationDegraded displays True and the condition reason is GetRegistrationDeploymentFailed or UnavailableRegistrationPod , you can check the condition message to get the problem details and attempt to resolve. If the KlusterletWorkDegraded displays True and the condition reason is GetWorkDeploymentFailed or UnavailableWorkPod , you can check the condition message to get the problem details and attempt to resolve. 1.10.17. Namespace remains after deleting a cluster When you remove a managed cluster, the namespace is normally removed as part of the cluster removal process. In rare cases, the namespace remains with some artifacts in it. In that case, you must manually remove the namespace. 1.10.17.1. Symptom: Namespace remains after deleting a cluster After removing a managed cluster, the namespace is not removed. 1.10.17.2. Resolving the problem: Namespace remains after deleting a cluster Complete the following steps to remove the namespace manually: Run the following command to produce a list of the resources that remain in the <cluster_name> namespace: Replace cluster_name with the name of the namespace for the cluster that you attempted to remove. Delete each identified resource on the list that does not have a status of Delete by entering the following command to edit the list: Replace resource_kind with the kind of the resource. Replace resource_name with the name of the resource. Replace namespace with the name of the namespace of the resource. Locate the finalizer attribute in the in the metadata. Delete the non-Kubernetes finalizers by using the vi editor dd command. Save the list and exit the vi editor by entering the :wq command. Delete the namespace by entering the following command: Replace cluster-name with the name of the namespace that you are trying to delete. 1.10.18. Auto-import-secret-exists error when importing a cluster Your cluster import fails with an error message that reads: auto import secret exists. 1.10.18.1. Symptom: Auto import secret exists error when importing a cluster When importing a hive cluster for management, an auto-import-secret already exists error is displayed. 1.10.18.2. Resolving the problem: Auto-import-secret-exists error when importing a cluster This problem occurs when you attempt to import a cluster that was previously managed. When this happens, the secrets conflict when you try to reimport the cluster. To work around this problem, complete the following steps: To manually delete the existing auto-import-secret , run the following command on the hub cluster: Replace cluster-namespace with the namespace of your cluster. Import your cluster again by using the procedure in Cluster import introduction . 1.10.19. Troubleshooting missing PlacementDecision after creating Placement If no PlacementDescision is generated after creating a Placement , follow the procedure to troubleshoot the problem. 1.10.19.1. Symptom: Missing PlacementDecision after creating Placement After creating a Placement , a PlacementDescision is not automatically generated. 1.10.19.2. Resolving the problem: Missing PlacementDecision after creating Placement To resolve the issue, complete the following steps: Check the Placement conditions by running the following command: Replace placement-name with the name of the Placement . The output might resemble the following example: Check the output for the Status of PlacementMisconfigured and PlacementSatisfied : If the PlacementMisconfigured Status is true, your Placement has configuration errors. Check the included message for more details on the configuration errors and how to resolve them. If the PlacementSatisfied Status is false, no managed cluster satisfies your Placement . Check the included message for more details and how to resolve the error. In the example, no ManagedClusterSetBindings were found in the placement namespace. You can check the score of each cluster in Events to find out why some clusters with lower scores are not selected. The output might resemble the following example: Note: The placement controller assigns a score and generates an event for each filtered ManagedCluster . The placement controller genereates a new event when the cluster score changes. 1.10.20. Troubleshooting a discovery failure of bare metal hosts on Dell hardware If the discovery of bare metal hosts fails on Dell hardware, the Integrated Dell Remote Access Controller (iDRAC) is likely configured to not allow certificates from unknown certificate authorities. 1.10.20.1. Symptom: Discovery failure of bare metal hosts on Dell hardware After you complete the procedure for discovering bare metal hosts by using the baseboard management controller, an error message similar to the following is displayed: 1.10.20.2. Resolving the problem: Discovery failure of bare metal hosts on Dell hardware The iDRAC is configured not to accept certificates from unknown certificate authorities. To bypass the problem, disable the certificate verification on the baseboard management controller of the host iDRAC by completing the following steps: In the iDRAC console, navigate to Configuration > Virtual media > Remote file share . Change the value of Expired or invalid certificate action to Yes . 1.10.21. Troubleshooting Minimal ISO boot failures You might encounter issues when trying to boot a minimal ISO. 1.10.21.1. Symptom: Minimal ISO boot failures The boot screen shows that the host has failed to download the root file system image. 1.10.21.2. Resolving the problem: Minimal ISO boot failures See Troubleshooting minimal ISO boot failures in the Assisted Installer for OpenShift Container Platform documentation to learn how to troubleshoot the issue. 1.10.22. Troubleshooting hosted clusters on Red Hat OpenShift Virtualization When you troubleshoot a hosted cluster on Red Hat OpenShift Virtualization, start with the top-level HostedCluster and NodePool resources and then work down the stack until you find the root cause. The following steps can help you discover the root cause of common issues. 1.10.22.1. Symptom: HostedCluster resource stuck in partial state A hosted control plane is not coming fully online because a HostedCluster resource is pending. 1.10.22.1.1. Identifying the problem: Check prerequisites, resource conditions, and node and operator status Ensure that you meet all of the prerequisites for a hosted cluster on Red Hat OpenShift Virtualization View the conditions on the HostedCluster and NodePool resources for validation errors that prevent progress. By using the kubeconfig file of the hosted cluster, inspect the status of the hosted cluster: View the output of the oc get clusteroperators command to see which cluster operators are pending. View the output of the oc get nodes command to ensure that worker nodes are ready. 1.10.22.2. Symptom: No worker nodes are registered A hosted control plane is not coming fully online because the hosted control plane has no worker nodes registered. 1.10.22.2.1. Identifying the problem: Check the status of various parts of the hosted control plane View the HostedCluster and NodePool conditions for failures that indicate what the problem might be. Enter the following command to view the KubeVirt worker node virtual machine (VM) status for the NodePool resource: If the VMs are stuck in the provisioning state, enter the following command to view the CDI import pods within the VM namespace for clues about why the importer pods have not completed: If the VMs are stuck in the starting state, enter the following command to view the status of the virt-launcher pods: If the virt-launcher pods are in a pending state, investigate why the pods are not being scheduled. For example, not enough resources might exist to run the virt-launcher pods. If the VMs are running but they are not registered as worker nodes, use the web console to gain VNC access to one of the affected VMs. The VNC output indicates whether the ignition configuration was applied. If a VM cannot access the hosted control plane ignition server on startup, the VM cannot be provisioned correctly. If the ignition configuration was applied but the VM is still not registering as a node, see Identifying the problem: Access the VM console logs to learn how to access the VM console logs during startup. 1.10.22.3. Symptom: Worker nodes are stuck in the NotReady state During cluster creation, nodes enter the NotReady state temporarily while the networking stack is rolled out. This part of the process is normal. However, if this part of the process takes longer than 15 minutes, an issue might have occurred. 1.10.22.3.1. Identifying the problem: Investigate the node object and pods Enter the following command to view the conditions on the node object and determine why the node is not ready: Enter the following command to look for failing pods within the cluster: 1.10.22.4. Symptom: Ingress and console cluster operators are not coming online A hosted control plane is not coming fully online because the Ingress and console cluster operators are not online. 1.10.22.4.1. Identifying the problem: Check wildcard DNS routes and load balancer If the cluster uses the default Ingress behavior, enter the following command to ensure that wildcard DNS routes are enabled on the OpenShift Container Platform cluster that the virtual machines (VMs) are hosted on: If you use a custom base domain for the hosted control plane, complete the following steps: Ensure that the load balancer is targeting the VM pods correctly. Ensure that the wildcard DNS entry is targeting the load balancer IP. 1.10.22.5. Symptom: Load balancer services for the hosted cluster are not available A hosted control plane is not coming fully online because the load balancer services are not becoming available. 1.10.22.5.1. Identifying the problem: Check events, details, and the kccm pod Look for events and details that are associated with the load balancer service within the hosted cluster. By default, load balancers for the hosted cluster are handled by the kubevirt-cloud-controller-manager within the hosted control plane namespace. Ensure that the kccm pod is online and view its logs for errors or warnings. To identify the kccm pod in the hosted control plane namespace, enter the following command: 1.10.22.6. Symptom: Hosted cluster PVCs are not available A hosted control plane is not coming fully online because the persistent volume claims (PVCs) for a hosted cluster are not available. 1.10.22.6.1. Identifying the problem: Check PVC events and details, and component logs Look for events and details that are associated with the PVC to understand which errors are occurring. If a PVC is failing to attach to a pod, view the logs for the kubevirt-csi-node daemonset component within the hosted cluster to further investigate the problem. To identify the kubevirt-csi-node pods for each node, enter the following command: If a PVC cannot bind to a persistent volume (PV), view the logs of the kubevirt-csi-controller component within the hosted control plane namespace. To identify the kubevirt-csi-controller pod within the hosted control plane namespace, enter the following command: 1.10.22.7. Symptom: VM nodes are not correctly joining the cluster A hosted control plane is not coming fully online because the VM nodes are not correctly joining the cluster. 1.10.22.7.1. Identifying the problem: Access the VM console logs To access the VM console logs, complete the steps in How to get serial console logs for VMs part of OpenShift Virtualization Hosted Control Plane clusters . 1.10.22.8. Troubleshooting the RHCOS image mirroring For hosted control planes on Red Hat OpenShift Virtualization in a disconnected environment, oc-mirror fails to automatically mirror the Red Hat Enterprise Linux CoreOS (RHCOS) image to the internal registry. When you create your first hosted cluster, the Kubevirt virtual machine does not boot, because the boot image is not availble in the internal registry. 1.10.22.8.1. Symptom: oc-mirror fails to attempt the RHCOS image mirroring The oc-mirror plugin does not mirror the {op-system-first} image from the release payload to the internal registry. 1.10.22.8.2. Resolving the problem: oc-mirror fails to attempt the RHCOS image mirroring To resolve this issue, manually mirror the RHCOS image to the internal registry. Complete the following steps: Get the internal registry name by running the following command: oc get imagecontentsourcepolicy -o json | jq -r '.items[].spec.repositoryDigestMirrors[0].mirrors[0]' Get a payload image by running the following command: oc get clusterversion version -ojsonpath='{.status.desired.image}' Extract the 0000_50_installer_coreos-bootimages.yaml file that contains boot images from your payload image on the hosted cluster. Replace <payload_image> with the name of your payload image. Run the following command: oc image extract --file /release-manifests/0000_50_installer_coreos-bootimages.yaml <payload_image> --confirm Get the RHCOS image by running the following command: cat 0000_50_installer_coreos-bootimages.yaml | yq -r .data.stream | jq -r '.architectures.x86_64.images.kubevirt."digest-ref"' Mirror the RHCOS image to your internal registry. Replace <rhcos_image> with your RHCOS image for example, quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d9643ead36b1c026be664c9c65c11433c6cdf71bfd93ba229141d134a4a6dd94 . Replace <internal_registry> with the name of your internal registry, for example, virthost.ostest.test.metalkube.org:5000/localimages/ocp-v4.0-art-dev . Run the following command: oc image mirror <rhcos_image> <internal_registry> Create a YAML file named rhcos-boot-kubevirt.yaml that defines the ImageDigestMirrorSet object. See the following example configuration: apiVersion: config.openshift.io/v1 kind: ImageDigestMirrorSet metadata: name: rhcos-boot-kubevirt spec: repositoryDigestMirrors: - mirrors: - <rhcos_image_no_digest> 1 source: virthost.ostest.test.metalkube.org:5000/localimages/ocp-v4.0-art-dev 2 1 Specify your RHCOS image without its digest, for example, quay.io/openshift-release-dev/ocp-v4.0-art-dev . 2 Specify the name of your internal registry, for example, virthost.ostest.test.metalkube.org:5000/localimages/ocp-v4.0-art-dev . Apply the rhcos-boot-kubevirt.yaml file to create the ImageDigestMirrorSet object by running the following command: oc apply -f rhcos-boot-kubevirt.yaml 1.10.22.9. Troubleshooting: Returning non bare metal clusters to the late binding pool If you are using late binding managed clusters without BareMetalHosts , you must complete additional manual steps to destroy a late binding cluster and return the nodes back to the Discovery ISO. 1.10.22.9.1. Symptom: Returning non bare metal clusters to the late binding pool For late binding managed clusters without BareMetalHosts , removing cluster information does not automatically return all nodes to the Discovery ISO. 1.10.22.9.2. Resolving the problem: Returning non bare metal clusters to the late binding pool To unbind the non bare metal nodes with late binding, complete the following steps: Remove the cluster information. See Removing a cluster from management to learn more. Clean the root disks. Reboot manually with the Discovery ISO. 1.10.22.10. Troubleshooting installation status stuck at Installing on a ROSA with hosted control planes cluster When you install multicluster engine for Kubernetes operator on a Red Hat OpenShift Service on AWS (ROSA) with hosted control planes cluster, multicluster engine operator becomes stuck at the Installing step and the local-cluster remains in the Unknown state. 1.10.22.10.1. Symptom: Installation status stuck at Installing on a ROSA with hosted control planes cluster The local-cluster remains in the Unknown state 10 minutes after installing multicluster engine operator. When you check the klusterlet-agent pod log in the open-cluster-management-agent namespace on your hub cluster, you see the following error message: E0809 18:45:29.450874 1 reflector.go:147] k8s.io/[email protected]/tools/cache/reflector.go:229: Failed to watch *v1.CertificateSigningRequest: failed to list *v1.CertificateSigningRequest: Get "https://api.xxx.openshiftapps.com:443/apis/certificates.k8s.io/v1/certificatesigningrequests?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate signed by unknown authority 1.10.22.10.2. Resolving the problem: Installation status stuck at Installing on a ROSA with hosted control planes cluster To resolve the problem, add the ISRG Root X1 certificate to the klusterlet CA bundle by completing the following steps: Download and encode the root CA certificate by running the following command: curl -s https://letsencrypt.org/certs/isrgrootx1.pem | base64 | tr -d "\n" Create a global KlusterletConfig resource and add your encoded CA certificate to the spec parameter section. See the following example: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerCABundle: "<your_ca_certificate>" Apply the resource by running the following command on the hub cluster: oc apply -f <filename> If the local-cluster state does not recover in 1 minute, export and decode the import.yaml file by running the following command on the hub cluster: oc get secret local-cluster-import -n local-cluster -o jsonpath={.data.import/.yaml} | base64 --decode > import.yaml Apply the file by running the following command on the hub cluster: oc apply -f import.yaml 1.10.22.11. Troubleshooting all managed clusters become Unknown on a ROSA with hosted control planes cluster On a Red Hat OpenShift Service on AWS (ROSA) with hosted control planes cluster, the state of all managed clusters might switch to Unknown . 1.10.22.11.1. Symptom: All managed clusters become Unknown on a ROSA with hosted control planes cluster The state of all managed cluster on a ROSA hosted cluster suddenly becomes Unknown . When you check the klusterlet-agent pod log in the open-cluster-management-agent namespace on your managed cluster, you see the following error message: E0809 18:45:29.450874 1 reflector.go:147] k8s.io/[email protected]/tools/cache/reflector.go:229: Failed to watch *v1.CertificateSigningRequest: failed to list *v1.CertificateSigningRequest: Get "https://api.xxx.openshiftapps.com:443/apis/certificates.k8s.io/v1/certificatesigningrequests?limit=500&resourceVersion=0": tls: failed to verify certificate: x509: certificate signed by unknown authority 1.10.22.11.2. Resolving the problem: All managed clusters become Unknown on a ROSA with hosted control planes cluster To resolve the problem, add the ISRG Root X1 certificate to the klusterlet CA bundle by completing the following steps: Download and encode the root CA certificate by running the following command: curl -s https://letsencrypt.org/certs/isrgrootx1.pem | base64 | tr -d "\n" Create a KlusterletConfig resource and add your encoded CA certificate to the spec parameter section. See the following example: apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerCABundle: "<your_ca_certificate>" Apply the resource by running the following command on the hub cluster: oc apply -f <filename> The state of some managed clusters might recover. For managed clusters that remain Unknown , export and decode the import.yaml file from the hub cluster by running the following command on the hub cluster. Replace values where needed: oc get secret <cluster_name>-import -n <cluster_name> -o jsonpath={.data.import/.yaml} | base64 --decode > <cluster_name>-import.yaml Apply the file by running the following command on the managed cluster. Replace values where needed: oc apply -f <cluster_name>-import.yaml 1.10.22.12. Troubleshooting an attempt to upgrade managed cluster with missing OpenShift Container Platform version You do not see the OpenShift Container Platform version that you want in the console when you attempt to upgrade your managed cluster in the console. 1.10.22.12.1. Symptom: Attempt to upgrade managed cluster with missing OpenShift Container Platform version When you attempt to upgrade a managed cluster from the console and click Upgrade available in the Cluster details view to choose the OpenShift Container Platform version from the dropdown list, the version is missing. 1.10.22.12.2. Resolving the problem: Attempt to upgrade managed cluster with missing OpenShift Container Platform version See the following procedure: Ensure the version you want is included in the status of the ClusterVersion resource on the managed cluster. Run the following command: oc get clusterversion version -o jsonpath='{.status.availableUpdates[*].version}' If your expected version is not displayed, then the version is not applicable for this managed cluster. Check if the ManagedClusterInfo resource includes the version on the hub cluster. Run the following command: oc -n <cluster_name> get managedclusterinfo <cluster_name> -o jsonpath='{.status.distributionInfo.ocp.availableUpdates[*]}' If the version is included, check to see if there is a ClusterCurator resource with a failure on the hub cluster. Run the following command: oc -n <cluster_name> get ClusterCurator <cluster_name> -o yaml If the ClusterCurator resource exists and the status of its clustercurator-job condition is False , delete the ClusterCurator resource from the hub cluster. Run the following command: oc -n <cluster_name> delete ClusterCurator <cluster_name> If the ManagedClusterInfo resource does not include the version, check the work-manager add-on log on the managed cluster and fix errors that are reported. Run the following command and replace the pod name with the real name in your environment: oc -n open-cluster-management-agent-addon logs klusterlet-addon-workmgr-<your_pod_name> 1.10.23. Troubleshooting hosted clusters on bare metal When you troubleshoot a hosted cluster on bare metal, take the following steps to discover the root cause of common issues. 1.10.23.1. Symptom: Nodes fail to be added to hosted control planes on bare metal When you scale up a hosted control planes cluster with nodes that were provisioned by using Assisted Installer, the host fails to pull the ignition with a URL that contains port 22642. That URL is invalid for hosted control planes and indicates that an issue exists with the cluster. 1.10.23.2. Identifying the problem: Nodes fail to be added to hosted control planes on bare metal To determine the issue, review the assisted-service logs: oc logs -n multicluster-engine <assisted_service_pod_name> 1 1 Specify the Assisted Service pod name. In the logs, find errors that resemble these examples: error="failed to get pull secret for update: invalid pull secret data in secret pull-secret" pull secret must contain auth for \"registry.redhat.io\" 1.10.23.3. Resolving the problem: Nodes fail to be added to hosted control planes on bare metal To fix this issue, follow the instructions in Add the pull secret to the namespace . | [
"delete csv -n openshift-operators volsync-product.v0.6.0",
"E0203 07:10:38.266841 1 reflector.go:138] sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go:224: Failed to watch *v1.ClusterClaim: failed to list *v1.ClusterClaim: v1.ClusterClaimList.Items: []v1.ClusterClaim: v1.ClusterClaim.v1.ClusterClaim.Spec: v1.ClusterClaimSpec.Lifetime: unmarshalerDecoder: time: unknown unit \"w\" in duration \"1w\", error found in #10 byte of ...|time\":\"1w\"}},{\"apiVe|..., bigger context ...|clusterPoolName\":\"policy-aas-hubs\",\"lifetime\":\"1w\"}},{\"apiVersion\":\"hive.openshift.io/v1\",\"kind\":\"Cl|",
"delete deployment multiclusterhub-repo -n <namespace>",
"get bmh -n <cluster_provisioning_namespace>",
"describe bmh -n <cluster_provisioning_namespace> <bmh_name>",
"Status: Error Count: 1 Error Message: Image provisioning failed: ... [Errno 36] File name too long",
"edit mch -n open-cluster-management multiclusterhub",
"delete managedcluster local-cluster",
"edit mch -n open-cluster-management multiclusterhub",
"edit clusterdeployment/<mycluster> -n <namespace>",
"delete ns <namespace>",
"compute: - hyperthreading: Enabled name: 'worker' replicas: 3 platform: azure: type: Standard_D2s_v3 osDisk: diskSizeGB: 128 vmNetworkingType: 'Basic'",
"status: agentLabelSelector: matchLabels: infraenvs.agent-install.openshift.io: qe2 bootArtifacts: initrd: https://assisted-image-service-multicluster-engine.redhat.com/images/0000/pxe-initrd?api_key=0000000&arch=x86_64&version=4.11 ipxeScript: https://assisted-service-multicluster-engine.redhat.com/api/assisted-install/v2/infra-envs/00000/downloads/files?api_key=000000000&file_name=ipxe-script kernel: https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.12/latest/rhcos-live-kernel-x86_64 rootfs: https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/4.12/latest/rhcos-live-rootfs.x86_64.img",
"for artifact in oc get infraenv qe2 -ojsonpath=\"{.status.bootArtifacts}\" | jq \". | keys[]\" | sed \"s/\\\"//g\" do curl -k oc get infraenv qe2 -ojsonpath=\"{.status.bootArtifacts.USD{artifact}}\"` -o USDartifact",
"patch clusterdeployment <clusterdeployment-name> -p '{\"metadata\":{\"finalizers\":null}}' --type=merge",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: cluster namespace: cluster spec: ignitionConfigOverride: '{\"ignition\":{\"version\":\"3.2.0\"},\"storage\":{\"files\":[{\"path\":\"/etc/containers/policy.json\",\"mode\":420,\"overwrite\":true,\"contents\":{\"source\":\"data:text/plain;charset=utf-8;base64,ewogICAgImRlZmF1bHQiOiBbCiAgICAgICAgewogICAgICAgICAgICAidHlwZSI6ICJpbnNlY3VyZUFjY2VwdEFueXRoaW5nIgogICAgICAgIH0KICAgIF0sCiAgICAidHJhbnNwb3J0cyI6CiAgICAgICAgewogICAgICAgICAgICAiZG9ja2VyLWRhZW1vbiI6CiAgICAgICAgICAgICAgICB7CiAgICAgICAgICAgICAgICAgICAgIiI6IFt7InR5cGUiOiJpbnNlY3VyZUFjY2VwdEFueXRoaW5nIn1dCiAgICAgICAgICAgICAgICB9CiAgICAgICAgfQp9\"}}]}}'",
"{ \"default\": [ { \"type\": \"insecureAcceptAnything\" } ], \"transports\": { \"docker-daemon\": { \"\": [ { \"type\": \"insecureAcceptAnything\" } ] } } }",
"apiVersion: v1 kind: ConfigMap metadata: name: my-assisted-service-config namespace: multicluster-engine data: ALLOW_CONVERGED_FLOW: \"false\" 1",
"annotate --overwrite AgentServiceConfig agent unsupported.agent-install.openshift.io/assisted-service-configmap=my-assisted-service-config",
"Error querying resource logs: Service unavailable",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv spec: osImageVersion: 4.15",
"get nodepool -A",
"get nodes --kubeconfig",
"get agents -A",
"create clusterrolebinding <role-binding-name> --clusterrole=open-cluster-management:cluster-manager-admin --user=<username>",
"create clusterrolebinding (role-binding-name) --clusterrole=open-cluster-management:admin:<cluster-name> --user=<username>",
"create rolebinding <role-binding-name> -n <cluster-name> --clusterrole=admin --user=<username>",
"create clusterrolebinding <role-binding-name> --clusterrole=open-cluster-management:view:<cluster-name> --user=<username>",
"create rolebinding <role-binding-name> -n <cluster-name> --clusterrole=view --user=<username>",
"get managedclusters.clusterview.open-cluster-management.io",
"get managedclustersets.clusterview.open-cluster-management.io",
"adm policy add-cluster-role-to-group open-cluster-management:clusterset-admin:server-foundation-clusterset server-foundation-team-admin",
"adm policy add-cluster-role-to-group open-cluster-management:clusterset-view:server-foundation-clusterset server-foundation-team-user",
"adm new-project server-foundation-clusterpool adm policy add-role-to-group admin server-foundation-team-admin --namespace server-foundation-clusterpool",
"-n openshift-console get route console",
"console console-openshift-console.apps.new-coral.purple-chesterfield.com console https reencrypt/Redirect None",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: {}",
"create namespace <namespace>",
"project <namespace>",
"apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: <default> namespace: <namespace> spec: targetNamespaces: - <namespace>",
"apply -f <path-to-file>/<operator-group>.yaml",
"apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: multicluster-engine spec: sourceNamespace: openshift-marketplace source: redhat-operators channel: stable-2.6 installPlanApproval: Automatic name: multicluster-engine",
"apply -f <path-to-file>/<subscription>.yaml",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: {}",
"apply -f <path-to-file>/<custom-resource>.yaml",
"error: unable to recognize \"./mce.yaml\": no matches for kind \"MultiClusterEngine\" in version \"operator.multicluster-engine.io/v1\"",
"get mce -o=jsonpath='{.items[0].status.phase}'",
"metadata: labels: node-role.kubernetes.io/infra: \"\" spec: taints: - effect: NoSchedule key: node-role.kubernetes.io/infra",
"spec: config: nodeSelector: node-role.kubernetes.io/infra: \"\" tolerations: - key: node-role.kubernetes.io/infra effect: NoSchedule operator: Exists",
"spec: nodeSelector: node-role.kubernetes.io/infra: \"\"",
"-n openshift-console get route console",
"console console-openshift-console.apps.new-name.purple-name.com console https reencrypt/Redirect None",
"additionalTrustBundle: | -----BEGIN CERTIFICATE----- certificate_content -----END CERTIFICATE----- sshKey: >-",
"- mirrors: - <your_registry>/rhacm2 source: registry.redhat.io/rhacm2 - mirrors: - <your_registry>/quay source: registry.redhat.io/quay - mirrors: - <your_registry>/compliance source: registry.redhat.io/compliance",
"apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: name: mce-repo spec: repositoryDigestMirrors: - mirrors: - mirror.registry.com:5000/multicluster-engine source: registry.redhat.io/multicluster-engine",
"apply -f mce-policy.yaml",
"UPSTREAM_REGISTRY=quay.io PRODUCT_REPO=openshift-release-dev RELEASE_NAME=ocp-release OCP_RELEASE=4.15.2-x86_64 LOCAL_REGISTRY=USD(hostname):5000 LOCAL_SECRET_JSON=<pull-secret> adm -a USD{LOCAL_SECRET_JSON} release mirror --from=USD{UPSTREAM_REGISTRY}/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE} --to=USD{LOCAL_REGISTRY}/ocp4 --to-release-image=USD{LOCAL_REGISTRY}/ocp4/release:USD{OCP_RELEASE}",
"git clone https://github.com/openshift/cincinnati-graph-data",
"build -f <docker-path> -t <graph-path>:latest",
"push <graph-path>:latest --authfile=<pull-secret>.json",
"apiVersion: v1 kind: ConfigMap metadata: name: trusted-ca data: updateservice-registry: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE-----",
"patch image.config.openshift.io cluster -p '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"<trusted_ca>\"}}}' --type merge",
"apiVersion: update-service.openshift.io/v1beta2 kind: update-service metadata: name: openshift-cincinnati-instance namespace: openshift-update-service spec: registry: <registry-host-name>:<port> 1 replicas: 1 repository: USD{LOCAL_REGISTRY}/ocp4/release graphDataImage: '<host-name>:<port>/cincinnati-graph-data-container' 2",
"apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-mirror namespace: default spec: disabled: false remediationAction: enforce policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-image-content-source-policy spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: name: <your-local-mirror-name> 1 spec: repositoryDigestMirrors: - mirrors: - <your-registry> 2 source: registry.redhat.io --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-mirror namespace: default placementRef: name: placement-policy-mirror kind: PlacementRule apiGroup: apps.open-cluster-management.io subjects: - name: policy-mirror kind: Policy apiGroup: policy.open-cluster-management.io --- apiVersion: apps.open-cluster-management.io/v1 kind: PlacementRule metadata: name: placement-policy-mirror namespace: default spec: clusterConditions: - status: \"True\" type: ManagedClusterConditionAvailable clusterSelector: matchExpressions: [] 3",
"apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-catalog namespace: default spec: disabled: false remediationAction: enforce policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-catalog spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: config.openshift.io/v1 kind: OperatorHub metadata: name: cluster spec: disableAllDefaultSources: true - complianceType: musthave objectDefinition: apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: name: my-operator-catalog namespace: openshift-marketplace spec: sourceType: grpc image: '<registry_host_name>:<port>/olm/redhat-operators:v1' 1 displayName: My Operator Catalog publisher: grpc --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-catalog namespace: default placementRef: name: placement-policy-catalog kind: PlacementRule apiGroup: apps.open-cluster-management.io subjects: - name: policy-catalog kind: Policy apiGroup: policy.open-cluster-management.io --- apiVersion: apps.open-cluster-management.io/v1 kind: PlacementRule metadata: name: placement-policy-catalog namespace: default spec: clusterConditions: - status: \"True\" type: ManagedClusterConditionAvailable clusterSelector: matchExpressions: [] 2",
"get clusterversion -o yaml",
"get routes",
"apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-cluster-version namespace: default annotations: policy.open-cluster-management.io/standards: null policy.open-cluster-management.io/categories: null policy.open-cluster-management.io/controls: null spec: disabled: false remediationAction: enforce policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-cluster-version spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: config.openshift.io/v1 kind: ClusterVersion metadata: name: version spec: channel: stable-4.4 upstream: >- https://example-cincinnati-policy-engine-uri/api/upgrades_info/v1/graph 1 --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-cluster-version namespace: default placementRef: name: placement-policy-cluster-version kind: PlacementRule apiGroup: apps.open-cluster-management.io subjects: - name: policy-cluster-version kind: Policy apiGroup: policy.open-cluster-management.io --- apiVersion: apps.open-cluster-management.io/v1 kind: PlacementRule metadata: name: placement-policy-cluster-version namespace: default spec: clusterConditions: - status: \"True\" type: ManagedClusterConditionAvailable clusterSelector: matchExpressions: [] 2",
"get clusterversion -o yaml",
"apiVersion: v1 items: - apiVersion: config.openshift.io/v1 kind: ClusterVersion [..] spec: channel: stable-4.4 upstream: https://<hub-cincinnati-uri>/api/upgrades_info/v1/graph",
"apiVersion: operator.open-cluster-management.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: <name> 1 enabled: true",
"patch MultiClusterEngine <multiclusterengine-name> --type=json -p='[{\"op\": \"add\", \"path\": \"/spec/overrides/components/-\",\"value\":{\"name\":\"<name>\",\"enabled\":true}}]'",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: local-cluster enabled: false",
"create secret generic <secret> -n <namespace> --from-file=.dockerconfigjson=<path-to-pull-secret> --type=kubernetes.io/dockerconfigjson",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: imagePullSecret: <secret>",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: targetNamespace: <target>",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: availabilityConfig: \"Basic\"",
"spec: nodeSelector: node-role.kubernetes.io/infra: \"\"",
"spec: tolerations: - key: node-role.kubernetes.io/infra effect: NoSchedule operator: Exists",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: managedserviceaccount enabled: true",
"patch MultiClusterEngine <multiclusterengine-name> --type=json -p='[{\"op\": \"add\", \"path\": \"/spec/overrides/components/-\",\"value\":{\"name\":\"managedserviceaccount\",\"enabled\":true}}]'",
"Cannot delete MultiClusterEngine resource because ManagedCluster resource(s) exist",
"project <namespace>",
"delete multiclusterengine --all",
"get multiclusterengine -o yaml",
"❯ oc get csv NAME DISPLAY VERSION REPLACES PHASE multicluster-engine.v2.0.0 multicluster engine for Kubernetes 2.0.0 Succeeded ❯ oc delete clusterserviceversion multicluster-engine.v2.0.0 ❯ oc delete sub multicluster-engine",
"#!/bin/bash delete apiservice v1.admission.cluster.open-cluster-management.io v1.admission.work.open-cluster-management.io delete validatingwebhookconfiguration multiclusterengines.multicluster.openshift.io delete mce --all",
"curl -L https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh | bash",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: addonDeploymentConfig metadata: name: addon-ns-config namespace: multicluster-engine spec: agentInstallNamespace: open-cluster-management-agent-addon-discovery",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: work-manager spec: addonMeta: displayName: work-manager installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All type: Placements",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: work-manager spec: addonMeta: displayName: work-manager installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All configs: - group: addon.open-cluster-management.io name: addon-ns-config namespace: multicluster-engine resource: addondeploymentconfigs type: Placements",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: managed-serviceaccount spec: addonMeta: displayName: managed-serviceaccount installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All configs: - group: addon.open-cluster-management.io name: addon-ns-config namespace: multicluster-engine resource: addondeploymentconfigs type: Placements",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ClusterManagementAddOn metadata: name: cluster-proxy spec: addonMeta: displayName: cluster-proxy installStrategy: placements: - name: global namespace: open-cluster-management-global-set rolloutStrategy: type: All configs: - group: addon.open-cluster-management.io name: addon-ns-config namespace: multicluster-engine resource: addondeploymentconfigs type: Placements",
"get deployment -n open-cluster-management-agent-addon-discovery",
"NAME READY UP-TO-DATE AVAILABLE AGE cluster-proxy-proxy-agent 1/1 1 1 24h klusterlet-addon-workmgr 1/1 1 1 24h managed-serviceaccount-addon-agent 1/1 1 1 24h",
"kind: KlusterletConfig apiVersion: config.open-cluster-management.io/v1alpha1 metadata: name: mce-import-klusterlet-config spec: installMode: type: noOperator noOperator: postfix: mce-import",
"label addondeploymentconfig addon-ns-config -n multicluster-engine cluster.open-cluster-management.io/backup=true",
"label addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine cluster.open-cluster-management.io/backup=true",
"label clustermanagementaddon work-manager cluster.open-cluster-management.io/backup=true",
"label clustermanagementaddon cluster-proxy cluster.open-cluster-management.io/backup=true",
"label clustermanagementaddon managed-serviceaccount cluster.open-cluster-management.io/backup=true",
"label KlusterletConfig mce-import-klusterlet-config cluster.open-cluster-management.io/backup=true",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: agent.open-cluster-management.io/klusterlet-config: mce-import-klusterlet-config 1 labels: cloud: auto-detect vendor: auto-detect name: mce-a 2 spec: hubAcceptsClient: true leaseDurationSeconds: 60",
"get managedcluster",
"NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE local-cluster true https://<api.acm-hub.com:port> True True 44h mce-a true https://<api.mce-a.com:port> True True 27s",
"patch addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine --type=merge -p '{\"spec\":{\"agentInstallNamespace\":\"open-cluster-management-agent-addon-discovery\"}}'",
"patch addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine --type=merge -p '{\"spec\":{\"customizedVariables\":[{\"name\":\"disableMetrics\",\"value\": \"true\"},{\"name\":\"disableHOManagement\",\"value\": \"true\"}]}}'",
"clusteradm addon enable --names hypershift-addon --clusters <managed-cluster-names>",
"get managedcluster",
"get deployment -n open-cluster-management-agent-addon-discovery",
"NAME READY UP-TO-DATE AVAILABLE AGE cluster-proxy-proxy-agent 1/1 1 1 24h klusterlet-addon-workmgr 1/1 1 1 24h hypershift-addon-agent 1/1 1 1 24h managed-serviceaccount-addon-agent 1/1 1 1 24h",
"apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: creationTimestamp: \"2024-05-30T23:05:39Z\" generation: 1 labels: hypershift.open-cluster-management.io/hc-name: hosted-cluster-1 hypershift.open-cluster-management.io/hc-namespace: clusters name: hosted-cluster-1 namespace: mce-1 resourceVersion: \"1740725\" uid: b4c36dca-a0c4-49f9-9673-f561e601d837 spec: apiUrl: https://a43e6fe6dcef244f8b72c30426fb6ae3-ea3fec7b113c88da.elb.us-west-1.amazonaws.com:6443 cloudProvider: aws creationTimestamp: \"2024-05-30T23:02:45Z\" credential: {} displayName: mce-1-hosted-cluster-1 importAsManagedCluster: false isManagedCluster: false name: hosted-cluster-1 openshiftVersion: 0.0.0 status: Active type: MultiClusterEngineHCP",
"apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-mce-hcp-autoimport namespace: open-cluster-management-global-set annotations: policy.open-cluster-management.io/standards: NIST SP 800-53 policy.open-cluster-management.io/categories: CM Configuration Management policy.open-cluster-management.io/controls: CM-2 Baseline Configuration policy.open-cluster-management.io/description: Discovered clusters that are of type MultiClusterEngineHCP can be automatically imported into ACM as managed clusters. This policy configure those discovered clusters so they are automatically imported. Fine tuning MultiClusterEngineHCP clusters to be automatically imported can be done by configure filters at the configMap or add annotation to the discoverd cluster. spec: disabled: false policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: mce-hcp-autoimport-config spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: v1 kind: ConfigMap metadata: name: discovery-config namespace: open-cluster-management-global-set data: rosa-filter: \"\" remediationAction: enforce 1 severity: low - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-mce-hcp-autoimport spec: remediationAction: enforce severity: low object-templates-raw: | {{- /* find the MultiClusterEngineHCP DiscoveredClusters */ -}} {{- range USDdc := (lookup \"discovery.open-cluster-management.io/v1\" \"DiscoveredCluster\" \"\" \"\").items }} {{- /* Check for the flag that indicates the import should be skipped */ -}} {{- USDskip := \"false\" -}} {{- range USDkey, USDvalue := USDdc.metadata.annotations }} {{- if and (eq USDkey \"discovery.open-cluster-management.io/previously-auto-imported\") (eq USDvalue \"true\") }} {{- USDskip = \"true\" }} {{- end }} {{- end }} {{- /* if the type is MultiClusterEngineHCP and the status is Active */ -}} {{- if and (eq USDdc.spec.status \"Active\") (contains (fromConfigMap \"open-cluster-management-global-set\" \"discovery-config\" \"mce-hcp-filter\") USDdc.spec.displayName) (eq USDdc.spec.type \"MultiClusterEngineHCP\") (eq USDskip \"false\") }} - complianceType: musthave objectDefinition: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: {{ USDdc.metadata.name }} namespace: {{ USDdc.metadata.namespace }} spec: importAsManagedCluster: true 2 {{- end }} {{- end }}",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: policy-mce-hcp-autoimport-placement namespace: open-cluster-management-global-set spec: tolerations: - key: cluster.open-cluster-management.io/unreachable operator: Exists - key: cluster.open-cluster-management.io/unavailable operator: Exists clusterSets: - global predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: local-cluster operator: In values: - \"true\"",
"apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: policy-mce-hcp-autoimport-placement-binding namespace: open-cluster-management-global-set placementRef: name: policy-mce-hcp-autoimport-placement apiGroup: cluster.open-cluster-management.io kind: Placement subjects: - name: policy-mce-hcp-autoimport apiGroup: policy.open-cluster-management.io kind: Policy",
"get policy policy-mce-hcp-autoimport -n <namespace>",
"annotations: discovery.open-cluster-management.io/previously-auto-imported: \"true\"",
"apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: name: policy-rosa-autoimport annotations: policy.open-cluster-management.io/standards: NIST SP 800-53 policy.open-cluster-management.io/categories: CM Configuration Management policy.open-cluster-management.io/controls: CM-2 Baseline Configuration policy.open-cluster-management.io/description: OpenShift Service on AWS discovered clusters can be automatically imported into Red Hat Advanced Cluster Management as managed clusters with this policy. You can select and configure those managed clusters so you can import. Configure filters or add an annotation if you do not want all of your OpenShift Service on AWS clusters to be automatically imported. spec: remediationAction: inform 1 disabled: false policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: rosa-autoimport-config spec: object-templates: - complianceType: musthave objectDefinition: apiVersion: v1 kind: ConfigMap metadata: name: discovery-config namespace: open-cluster-management-global-set data: rosa-filter: \"\" 2 remediationAction: enforce severity: low - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-rosa-autoimport spec: remediationAction: enforce severity: low object-templates-raw: | {{- /* find the ROSA DiscoveredClusters */ -}} {{- range USDdc := (lookup \"discovery.open-cluster-management.io/v1\" \"DiscoveredCluster\" \"\" \"\").items }} {{- /* Check for the flag that indicates the import should be skipped */ -}} {{- USDskip := \"false\" -}} {{- range USDkey, USDvalue := USDdc.metadata.annotations }} {{- if and (eq USDkey \"discovery.open-cluster-management.io/previously-auto-imported\") (eq USDvalue \"true\") }} {{- USDskip = \"true\" }} {{- end }} {{- end }} {{- /* if the type is ROSA and the status is Active */ -}} {{- if and (eq USDdc.spec.status \"Active\") (contains (fromConfigMap \"open-cluster-management-global-set\" \"discovery-config\" \"rosa-filter\") USDdc.spec.displayName) (eq USDdc.spec.type \"ROSA\") (eq USDskip \"false\") }} - complianceType: musthave objectDefinition: apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: {{ USDdc.metadata.name }} namespace: {{ USDdc.metadata.namespace }} spec: importAsManagedCluster: true {{- end }} {{- end }} - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: policy-rosa-managedcluster-status spec: remediationAction: enforce severity: low object-templates-raw: | {{- /* Use the same DiscoveredCluster list to check ManagedCluster status */ -}} {{- range USDdc := (lookup \"discovery.open-cluster-management.io/v1\" \"DiscoveredCluster\" \"\" \"\").items }} {{- /* Check for the flag that indicates the import should be skipped */ -}} {{- USDskip := \"false\" -}} {{- range USDkey, USDvalue := USDdc.metadata.annotations }} {{- if and (eq USDkey \"discovery.open-cluster-management.io/previously-auto-imported\") (eq USDvalue \"true\") }} {{- USDskip = \"true\" }} {{- end }} {{- end }} {{- /* if the type is ROSA and the status is Active */ -}} {{- if and (eq USDdc.spec.status \"Active\") (contains (fromConfigMap \"open-cluster-management-global-set\" \"discovery-config\" \"rosa-filter\") USDdc.spec.displayName) (eq USDdc.spec.type \"ROSA\") (eq USDskip \"false\") }} - complianceType: musthave objectDefinition: apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: {{ USDdc.spec.displayName }} namespace: {{ USDdc.spec.displayName }} status: conditions: - type: ManagedClusterConditionAvailable status: \"True\" {{- end }} {{- end }}",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement-openshift-plus-hub spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: name operator: In values: - local-cluster",
"apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: binding-policy-rosa-autoimport placementRef: apiGroup: cluster.open-cluster-management.io kind: Placement name: placement-policy-rosa-autoimport subjects: - apiGroup: policy.open-cluster-management.io kind: Policy name: policy-rosa-autoimport",
"get policy policy-rosa-autoimport -n <namespace>",
"kind: Secret metadata: name: <managed-cluster-name>-aws-creds namespace: <managed-cluster-namespace> type: Opaque data: aws_access_key_id: USD(echo -n \"USD{AWS_KEY}\" | base64 -w0) aws_secret_access_key: USD(echo -n \"USD{AWS_SECRET}\" | base64 -w0)",
"label secret hypershift-operator-oidc-provider-s3-credentials -n local-cluster \"cluster.open-cluster-management.io/type=awss3\" label secret hypershift-operator-oidc-provider-s3-credentials -n local-cluster \"cluster.open-cluster-management.io/credentials=credentials=\"",
"az ad sp create-for-rbac --role Contributor --name <service_principal> --scopes <subscription_path>",
"az ad sp create-for-rbac --role Contributor --name <service_principal> --scopes <subscription_path>",
"az account show",
"az account show",
"kind: Secret metadata: name: <managed-cluster-name>-azure-creds namespace: <managed-cluster-namespace> type: Opaque data: baseDomainResourceGroupName: USD(echo -n \"USD{azure_resource_group_name}\" | base64 -w0) osServicePrincipal.json: USD(base64 -w0 \"USD{AZURE_CRED_JSON}\")",
"kind: Secret metadata: name: <managed-cluster-name>-gcp-creds namespace: <managed-cluster-namespace> type: Opaque data: osServiceAccount.json: USD(base64 -w0 \"USD{GCP_CRED_JSON}\")",
"- mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release-nightly - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-v4.0-art-dev",
"- mirrors: - registry.example.com:5000/rhacm2 source: registry.redhat.io/rhacm2",
"kind: Secret metadata: name: <managed-cluster-name>-vsphere-creds namespace: <managed-cluster-namespace> type: Opaque data: username: USD(echo -n \"USD{VMW_USERNAME}\" | base64 -w0) password.json: USD(base64 -w0 \"USD{VMW_PASSWORD}\")",
"- mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release-nightly - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-release - mirrors: - registry.example.com:5000/ocp4 source: quay.io/openshift-release-dev/ocp-v4.0-art-dev",
"- mirrors: - registry.example.com:5000/rhacm2 source: registry.redhat.io/rhacm2",
"kind: Secret metadata: name: <managed-cluster-name>-osp-creds namespace: <managed-cluster-namespace> type: Opaque data: clouds.yaml: USD(base64 -w0 \"USD{OSP_CRED_YAML}\") cloud: USD(echo -n \"openstack\" | base64 -w0)",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast visible: 'true' name: img4.x.1-x86-64-appsub spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.x.1-x86_64",
"get clusterimageset",
"quay.io/openshift-release-dev/ocp-release:4.6.8-x86_64",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast visible: 'true' name: img4.x.0-multi-appsub spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.x.0-multi",
"pull quay.io/openshift-release-dev/ocp-release:4.x.1-x86_64 pull quay.io/openshift-release-dev/ocp-release:4.x.1-ppc64le pull quay.io/openshift-release-dev/ocp-release:4.x.1-s390x pull quay.io/openshift-release-dev/ocp-release:4.x.1-aarch64",
"login <private-repo>",
"push quay.io/openshift-release-dev/ocp-release:4.x.1-x86_64 <private-repo>/ocp-release:4.x.1-x86_64 push quay.io/openshift-release-dev/ocp-release:4.x.1-ppc64le <private-repo>/ocp-release:4.x.1-ppc64le push quay.io/openshift-release-dev/ocp-release:4.x.1-s390x <private-repo>/ocp-release:4.x.1-s390x push quay.io/openshift-release-dev/ocp-release:4.x.1-aarch64 <private-repo>/ocp-release:4.x.1-aarch64",
"manifest create mymanifest",
"manifest add mymanifest <private-repo>/ocp-release:4.x.1-x86_64 manifest add mymanifest <private-repo>/ocp-release:4.x.1-ppc64le manifest add mymanifest <private-repo>/ocp-release:4.x.1-s390x manifest add mymanifest <private-repo>/ocp-release:4.x.1-aarch64",
"manifest push mymanifest docker://<private-repo>/ocp-release:4.x.1",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast visible: \"true\" name: img4.x.1-appsub spec: releaseImage: <private-repo>/ocp-release:4.x.1",
"apply -f <file-name>.yaml",
"apiVersion: v1 kind: ConfigMap metadata: name: cluster-image-set-git-repo namespace: multicluster-engine data: gitRepoUrl: <forked acm-hive-openshift-releases repository URL> gitRepoBranch: backplane-<2.x> gitRepoPath: clusterImageSets channel: <fast or stable>",
"get clusterImageSets delete clusterImageSet <clusterImageSet_NAME>",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: fast name: img<4.x.x>-x86-64-appsub spec: releaseImage: IMAGE_REGISTRY_IPADDRESS_or__DNSNAME/REPO_PATH/ocp-release@sha256:073a4e46289be25e2a05f5264c8f1d697410db66b960c9ceeddebd1c61e58717",
"adm release info <tagged_openshift_release_image> | grep \"Pull From\"",
"Pull From: quay.io/openshift-release-dev/ocp-release@sha256:69d1292f64a2b67227c5592c1a7d499c7d00376e498634ff8e1946bc9ccdddfe",
"create -f <clusterImageSet_FILE>",
"create -f img4.11.9-x86_64.yaml",
"delete -f subscribe/subscription-fast",
"make subscribe-candidate",
"make subscribe-fast",
"make subscribe-stable",
"delete -f subscribe/subscription-fast",
"get crd baremetalhosts.metal3.io",
"Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io \"baremetalhosts.metal3.io\" not found",
"apply -f",
"get provisioning",
"patch provisioning provisioning-configuration --type merge -p '{\"spec\":{\"watchAllNamespaces\": true }}'",
"apiVersion: metal3.io/v1alpha1 kind: Provisioning metadata: name: provisioning-configuration spec: provisioningNetwork: \"Disabled\" watchAllNamespaces: true",
"apply -f",
"apiVersion: v1 kind: ConfigMap metadata: name: <mirror-config> namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | <certificate-content> registries.conf: | unqualified-search-registries = [\"registry.access.redhat.com\", \"docker.io\"] [[registry]] prefix = \"\" location = \"registry.redhat.io/multicluster-engine\" mirror-by-digest-only = true [[registry.mirror]] location = \"mirror.registry.com:5000/multicluster-engine\"",
"{ \"Authorization\": \"Basic xyz\" }",
"{ \"api_key\": \"myexampleapikey\", }",
"create secret generic -n multicluster-engine os-images-http-auth --from-file=./query_params --from-file=./headers",
"-n multicluster-engine create configmap image-service-additional-ca --from-file=tls.crt",
"apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: name: agent spec: databaseStorage: accessModes: - ReadWriteOnce resources: requests: storage: <db_volume_size> filesystemStorage: accessModes: - ReadWriteOnce resources: requests: storage: <fs_volume_size> mirrorRegistryRef: name: <mirror_config> 1 unauthenticatedRegistries: - <unauthenticated_registry> 2 imageStorage: accessModes: - ReadWriteOnce resources: requests: storage: <img_volume_size> 3 OSImageAdditionalParamsRef: name: os-images-http-auth OSImageCACertRef: name: image-service-additional-ca osImages: - openshiftVersion: \"<ocp_version>\" 4 version: \"<ocp_release_version>\" 5 url: \"<iso_url>\" 6 cpuArchitecture: \"x86_64\"",
"apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: name: agent spec: databaseStorage: accessModes: - ReadWriteOnce resources: requests: storage: <db_volume_size> 1 filesystemStorage: accessModes: - ReadWriteOnce resources: requests: storage: <fs_volume_size> 2 imageStorage: accessModes: - ReadWriteOnce resources: requests: storage: <img_volume_size> 3",
"login",
"apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: agent-install.openshift.io/service-image-base: el8",
"get routes --all-namespaces | grep assisted-image-service",
"apiVersion: operator.openshift.io/v1 kind: IngressController metadata: name: ingress-controller-with-nlb namespace: openshift-ingress-operator spec: domain: nlb-apps.<domain>.com routeSelector: matchLabels: router-type: nlb endpointPublishingStrategy: type: LoadBalancerService loadBalancer: scope: External providerParameters: type: AWS aws: type: NLB",
"apply -f ingresscontroller.yaml",
"get ingresscontroller -n openshift-ingress-operator",
"edit ingresscontroller <name> -n openshift-ingress-operator",
"edit route assisted-image-service -n <namespace>",
"metadata: labels: router-type: nlb name: assisted-image-service",
"assisted-image-service-multicluster-engine.apps.<yourdomain>.com",
"get pods -n multicluster-engine | grep assist",
"login",
"apiVersion: v1 kind: Namespace metadata: name: <your_namespace> 1",
"apply -f namespace.yaml",
"apiVersion: v1 kind: Secret type: kubernetes.io/dockerconfigjson metadata: name: pull-secret 1 namespace: <your_namespace> stringData: .dockerconfigjson: <your_pull_secret> 2",
"apply -f pull-secret.yaml",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: myinfraenv namespace: <your_namespace> spec: proxy: httpProxy: <http://user:password@ipaddr:port> httpsProxy: <http://user:password@ipaddr:port> noProxy: additionalNTPSources: sshAuthorizedKey: pullSecretRef: name: <name> agentLabels: <key>: <value> nmStateConfigLabelSelector: matchLabels: <key>: <value> clusterRef: name: <cluster_name> namespace: <project_name> ignitionConfigOverride: '{\"ignition\": {\"version\": \"3.1.0\"}, ...}' cpuArchitecture: x86_64 ipxeScriptType: DiscoveryImageAlways kernelArguments: - operation: append value: audit=0 additionalTrustBundle: <bundle> osImageVersion: <version>",
"apply -f infra-env.yaml",
"describe infraenv myinfraenv -n <your_namespace>",
"apiVersion: agent-install.openshift.io/v1beta1 kind: NMStateConfig metadata: name: mynmstateconfig namespace: <your-infraenv-namespace> labels: some-key: <some-value> spec: config: interfaces: - name: eth0 type: ethernet state: up mac-address: 02:00:00:80:12:14 ipv4: enabled: true address: - ip: 192.168.111.30 prefix-length: 24 dhcp: false - name: eth1 type: ethernet state: up mac-address: 02:00:00:80:12:15 ipv4: enabled: true address: - ip: 192.168.140.30 prefix-length: 24 dhcp: false dns-resolver: config: server: - 192.168.126.1 routes: config: - destination: 0.0.0.0/0 next-hop-address: 192.168.111.1 next-hop-interface: eth1 table-id: 254 - destination: 0.0.0.0/0 next-hop-address: 192.168.140.1 next-hop-interface: eth1 table-id: 254 interfaces: - name: \"eth0\" macAddress: \"02:00:00:80:12:14\" - name: \"eth1\" macAddress: \"02:00:00:80:12:15\"",
"apply -f nmstateconfig.yaml",
"You now see a URL to download the ISO file. Booted hosts appear in the host inventory table. Hosts might take a few minutes to appear.",
"*Note:* By default, the ISO that is provided is a _minimal_ ISO. The minimal ISO does not contain the root file system, `RootFS`. The `RootFS` is downloaded later. To display full ISO, replace `minimal.iso` in the URL with `full.iso`.",
"get infraenv -n <infra env namespace> <infra env name> -o jsonpath='{.status.isoDownloadURL}'",
"https://assisted-image-service-assisted-installer.apps.example-acm-hub.com/byapikey/eyJhbGciOiJFUzI1NiIsInC93XVCJ9.eyJpbmZyYV9lbnZfaWQcTA0Y38sWVjYi02MTA0LTQ4NDMtODasdkOGIxYTZkZGM5ZTUifQ.3ydTpHaXJmTasd7uDp2NvGUFRKin3Z9Qct3lvDky1N-5zj3KsRePhAM48aUccBqmucGt3g/4.16/x86_64/minimal.iso",
"get agent -n <infra env namespace>",
"NAME CLUSTER APPROVED ROLE STAGE 24a92a6f-ea35-4d6f-9579-8f04c0d3591e false auto-assign",
"patch agent -n <infra env namespace> <agent name> -p '{\"spec\":{\"approved\":true}}' --type merge",
"get agent -n <infra env namespace>",
"NAME CLUSTER APPROVED ROLE STAGE 173e3a84-88e2-4fe1-967f-1a9242503bec true auto-assign",
"apiVersion: v1 kind: Secret metadata: name: <bmc-secret-name> namespace: <your_infraenv_namespace> 1 type: Opaque data: username: <username> password: <password>",
"apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: <bmh-name> namespace: <your-infraenv-namespace> 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: <hostname> 2 bmac.agent-install.openshift.io/role: <role> 3 labels: infraenvs.agent-install.openshift.io: <your-infraenv> 4 spec: online: true automatedCleaningMode: disabled 5 bootMACAddress: <your-mac-address> 6 bmc: address: <machine-address> 7 credentialsName: <bmc-secret-name> 8 rootDeviceHints: deviceName: /dev/sda 9",
"apiVersion: v1 kind: ConfigMap metadata: name: my-assisted-service-config namespace: multicluster-engine data: ALLOW_CONVERGED_FLOW: \"false\"",
"annotate --overwrite AgentServiceConfig agent unsupported.agent-install.openshift.io/assisted-service-configmap=my-assisted-service-config",
"bmac.agent-install.openshift.io/remove-agent-and-node-on-delete: true",
"delete bmh <bmh-name>",
"[\"--append-karg\", \"ip=192.0.2.2::192.0.2.254:255.255.255.0:core0.example.com:enp1s0:none\", \"--save-partindex\", \"4\"]",
"{\"ignition\": \"version\": \"3.1.0\"}, \"storage\": {\"files\": [{\"path\": \"/tmp/example\", \"contents\": {\"source\": \"data:text/plain;base64,aGVscGltdHJhcHBlZGluYXN3YWdnZXJzcGVj\"}}]}}",
"git clone https://github.com/stolostron/acm-hive-openshift-releases.git cd acm-hive-openshift-releases git checkout origin/backplane-<2.x>",
"find clusterImageSets/fast -type d -exec oc apply -f {} \\; 2> /dev/null",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: labels: channel: stable visible: 'true' name: img4.x.47-x86-64-appsub spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.x.47-x86_64",
"kind: ConfigMap apiVersion: v1 metadata: name: <my-baremetal-cluster-install-manifests> namespace: <mynamespace> data: 99_metal3-config.yaml: | kind: ConfigMap apiVersion: v1 metadata: name: metal3-config namespace: openshift-machine-api data: http_port: \"6180\" provisioning_interface: \"enp1s0\" provisioning_ip: \"172.00.0.3/24\" dhcp_range: \"172.00.0.10,172.00.0.100\" deploy_kernel_url: \"http://172.00.0.3:6180/images/ironic-python-agent.kernel\" deploy_ramdisk_url: \"http://172.00.0.3:6180/images/ironic-python-agent.initramfs\" ironic_endpoint: \"http://172.00.0.3:6385/v1/\" ironic_inspector_endpoint: \"http://172.00.0.3:5150/v1/\" cache_url: \"http://192.168.111.1/images\" rhcos_image_url: \"https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3/43.81.201911192044.0/x86_64/rhcos-43.81.201911192044.0-openstack.x86_64.qcow2.gz\"",
"apply -f <filename>.yaml",
"apiVersion: hive.openshift.io/v1 kind: ClusterDeployment metadata: name: <my-baremetal-cluster> namespace: <mynamespace> annotations: hive.openshift.io/try-install-once: \"true\" spec: baseDomain: test.example.com clusterName: <my-baremetal-cluster> controlPlaneConfig: servingCertificates: {} platform: baremetal: libvirtSSHPrivateKeySecretRef: name: provisioning-host-ssh-private-key provisioning: installConfigSecretRef: name: <my-baremetal-cluster-install-config> sshPrivateKeySecretRef: name: <my-baremetal-hosts-ssh-private-key> manifestsConfigMapRef: name: <my-baremetal-cluster-install-manifests> imageSetRef: name: <my-clusterimageset> sshKnownHosts: - \"10.1.8.90 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXvVVVKUYVkuyvkuygkuyTCYTytfkufTYAAAAIbmlzdHAyNTYAAABBBKWjJRzeUVuZs4yxSy4eu45xiANFIIbwE3e1aPzGD58x/NX7Yf+S8eFKq4RrsfSaK2hVJyJjvVIhUsU9z2sBJP8=\" pullSecretRef: name: <my-baremetal-cluster-pull-secret>",
"apply -f <filename>.yaml",
"vpc-1 (us-gov-east-1) : 10.0.0.0/20 subnet-11 (us-gov-east-1a): 10.0.0.0/23 subnet-12 (us-gov-east-1b): 10.0.2.0/23 subnet-13 (us-gov-east-1c): 10.0.4.0/23 subnet-12 (us-gov-east-1d): 10.0.8.0/23 subnet-12 (us-gov-east-1e): 10.0.10.0/23 subnet-12 (us-gov-east-1f): 10.0.12.0/2",
"vpc-2 (us-gov-east-1) : 10.0.16.0/20 subnet-21 (us-gov-east-1a): 10.0.16.0/23 subnet-22 (us-gov-east-1b): 10.0.18.0/23 subnet-23 (us-gov-east-1c): 10.0.20.0/23 subnet-24 (us-gov-east-1d): 10.0.22.0/23 subnet-25 (us-gov-east-1e): 10.0.24.0/23 subnet-26 (us-gov-east-1f): 10.0.28.0/23",
"ec2:CreateVpcEndpointServiceConfiguration ec2:DescribeVpcEndpointServiceConfigurations ec2:ModifyVpcEndpointServiceConfiguration ec2:DescribeVpcEndpointServicePermissions ec2:ModifyVpcEndpointServicePermissions ec2:DeleteVpcEndpointServiceConfigurations",
"ec2:DescribeVpcEndpointServices ec2:DescribeVpcEndpoints ec2:CreateVpcEndpoint ec2:CreateTags ec2:DescribeNetworkInterfaces ec2:DescribeVPCs ec2:DeleteVpcEndpoints route53:CreateHostedZone route53:GetHostedZone route53:ListHostedZonesByVPC route53:AssociateVPCWithHostedZone route53:DisassociateVPCFromHostedZone route53:CreateVPCAssociationAuthorization route53:DeleteVPCAssociationAuthorization route53:ListResourceRecordSets route53:ChangeResourceRecordSets route53:DeleteHostedZone",
"route53:AssociateVPCWithHostedZone route53:DisassociateVPCFromHostedZone ec2:DescribeVPCs",
"spec: awsPrivateLink: ## The list of inventory of VPCs that can be used to create VPC ## endpoints by the controller. endpointVPCInventory: - region: us-east-1 vpcID: vpc-1 subnets: - availabilityZone: us-east-1a subnetID: subnet-11 - availabilityZone: us-east-1b subnetID: subnet-12 - availabilityZone: us-east-1c subnetID: subnet-13 - availabilityZone: us-east-1d subnetID: subnet-14 - availabilityZone: us-east-1e subnetID: subnet-15 - availabilityZone: us-east-1f subnetID: subnet-16 - region: us-east-1 vpcID: vpc-2 subnets: - availabilityZone: us-east-1a subnetID: subnet-21 - availabilityZone: us-east-1b subnetID: subnet-22 - availabilityZone: us-east-1c subnetID: subnet-23 - availabilityZone: us-east-1d subnetID: subnet-24 - availabilityZone: us-east-1e subnetID: subnet-25 - availabilityZone: us-east-1f subnetID: subnet-26 ## The credentialsSecretRef references a secret with permissions to create. ## The resources in the account where the inventory of VPCs exist. credentialsSecretRef: name: <hub-account-credentials-secret-name> ## A list of VPC where various mce clusters exists. associatedVPCs: - region: region-mce1 vpcID: vpc-mce1 credentialsSecretRef: name: <credentials-that-have-access-to-account-where-MCE1-VPC-exists> - region: region-mce2 vpcID: vpc-mce2 credentialsSecretRef: name: <credentials-that-have-access-to-account-where-MCE2-VPC-exists>",
"api.<cluster_name>.<base_domain>",
"*.apps.<cluster_name>.<base_domain>",
"api.<cluster_name>.<base_domain>",
"*.apps.<cluster_name>.<base_domain>",
"apiVersion: v1 kind: Secret type: Opaque metadata: name: ocp3-openstack-trust namespace: ocp3 stringData: ca.crt: | -----BEGIN CERTIFICATE----- <Base64 certificate contents here> -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- <Base64 certificate contents here> -----END CERTIFICATE----",
"platform: openstack: certificatesSecretRef: name: ocp3-openstack-trust credentialsSecretRef: name: ocp3-openstack-creds cloud: openstack",
"api.<cluster_name>.<base_domain>",
"*.apps.<cluster_name>.<base_domain>",
"apiVersion: v1 kind: Namespace metadata: name: sample-namespace",
"apiVersion: v1 kind: Secret type: kubernetes.io/dockerconfigjson metadata: name: <pull-secret> namespace: sample-namespace stringData: .dockerconfigjson: 'your-pull-secret-json' 1",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: name: openshift-v4.14.0 spec: releaseImage: quay.io/openshift-release-dev/ocp-release:4.14.0-rc.0-x86_64",
"apiVersion: hive.openshift.io/v1 kind: ClusterDeployment metadata: name: single-node namespace: demo-worker4 spec: baseDomain: hive.example.com clusterInstallRef: group: extensions.hive.openshift.io kind: AgentClusterInstall name: test-agent-cluster-install 1 version: v1beta1 clusterName: test-cluster controlPlaneConfig: servingCertificates: {} platform: agentBareMetal: agentSelector: matchLabels: location: internal pullSecretRef: name: <pull-secret> 2",
"apiVersion: extensions.hive.openshift.io/v1beta1 kind: AgentClusterInstall metadata: name: test-agent-cluster-install namespace: demo-worker4 spec: platformType: BareMetal 1 clusterDeploymentRef: name: single-node 2 imageSetRef: name: openshift-v4.14.0 3 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 192.168.111.0/24 serviceNetwork: - 172.30.0.0/16 provisionRequirements: controlPlaneAgents: 1 sshPublicKey: ssh-rsa <your-public-key-here> 4",
"apiVersion: agent-install.openshift.io/v1beta1 kind: NMStateConfig metadata: name: <mynmstateconfig> namespace: <demo-worker4> labels: demo-nmstate-label: <value> spec: config: interfaces: - name: eth0 type: ethernet state: up mac-address: 02:00:00:80:12:14 ipv4: enabled: true address: - ip: 192.168.111.30 prefix-length: 24 dhcp: false - name: eth1 type: ethernet state: up mac-address: 02:00:00:80:12:15 ipv4: enabled: true address: - ip: 192.168.140.30 prefix-length: 24 dhcp: false dns-resolver: config: server: - 192.168.126.1 routes: config: - destination: 0.0.0.0/0 next-hop-address: 192.168.111.1 next-hop-interface: eth1 table-id: 254 - destination: 0.0.0.0/0 next-hop-address: 192.168.140.1 next-hop-interface: eth1 table-id: 254 interfaces: - name: \"eth0\" macAddress: \"02:00:00:80:12:14\" - name: \"eth1\" macAddress: \"02:00:00:80:12:15\"",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: myinfraenv namespace: demo-worker4 spec: clusterRef: name: single-node 1 namespace: demo-worker4 2 pullSecretRef: name: pull-secret sshAuthorizedKey: <your_public_key_here> nmStateConfigLabelSelector: matchLabels: demo-nmstate-label: value proxy: httpProxy: http://USERNAME:[email protected]:PORT httpsProxy: https://USERNAME:[email protected]:PORT noProxy: .example.com,172.22.0.0/24,10.10.0.0/24",
"curl --insecure -o image.iso USD(kubectl -n sample-namespace get infraenvs.agent-install.openshift.io myinfraenv -o=jsonpath=\"{.status.isoDownloadURL}\")",
"-n sample-namespace patch agents.agent-install.openshift.io 07e80ea9-200c-4f82-aff4-4932acb773d4 -p '{\"spec\":{\"approved\":true}}' --type merge",
"apiVersion: v1 kind: Proxy baseDomain: <domain> proxy: httpProxy: http://<username>:<password>@<proxy.example.com>:<port> httpsProxy: https://<username>:<password>@<proxy.example.com>:<port> noProxy: <wildcard-of-domain>,<provisioning-network/CIDR>,<BMC-address-range/CIDR>",
"apiVersion: extensions.hive.openshift.io/v1beta1 kind: AgentClusterInstall spec: proxy: httpProxy: http://<username>:<password>@<proxy.example.com>:<port> 1 httpsProxy: https://<username>:<password>@<proxy.example.com>:<port> 2 noProxy: <wildcard-of-domain>,<provisioning-network/CIDR>,<BMC-address-range/CIDR> 3",
"create secret generic pull-secret -n <open-cluster-management> --from-file=.dockerconfigjson=<path-to-pull-secret> --type=kubernetes.io/dockerconfigjson",
"edit managedcluster <cluster-name>",
"spec: hubAcceptsClient: true managedClusterClientConfigs: - url: <https://api.new-managed.dev.redhat.com> 1",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: open-cluster-management/nodeSelector: '{\"dedicated\":\"acm\"}' open-cluster-management/tolerations: '[{\"key\":\"dedicated\",\"operator\":\"Equal\",\"value\":\"acm\",\"effect\":\"NoSchedule\"}]'",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: <klusterletconfigName> spec: nodePlacement: nodeSelector: dedicated: acm tolerations: - key: dedicated operator: Equal value: acm effect: NoSchedule",
"delete po -n open-cluster-management `oc get pod -n open-cluster-management | grep multiclusterhub-operator| cut -d' ' -f1`",
"login",
"new-project <cluster_name>",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: <cluster_name> labels: cloud: auto-detect vendor: auto-detect spec: hubAcceptsClient: true",
"cloud: Amazon vendor: OpenShift",
"apply -f managed-cluster.yaml",
"apiVersion: v1 kind: Secret metadata: name: auto-import-secret namespace: <cluster_name> stringData: autoImportRetry: \"5\" # If you are using the kubeconfig file, add the following value for the kubeconfig file # that has the current context set to the cluster to import: kubeconfig: |- <kubeconfig_file> # If you are using the token/server pair, add the following two values instead of # the kubeconfig file: token: <Token to access the cluster> server: <cluster_api_url> type: Opaque",
"apply -f auto-import-secret.yaml",
"-n <cluster_name> annotate secrets auto-import-secret managedcluster-import-controller.open-cluster-management.io/keeping-auto-import-secret=\"\"",
"get managedcluster <cluster_name>",
"login",
"get pod -n open-cluster-management-agent",
"get secret <cluster_name>-import -n <cluster_name> -o jsonpath={.data.crds\\\\.yaml} | base64 --decode > klusterlet-crd.yaml",
"get secret <cluster_name>-import -n <cluster_name> -o jsonpath={.data.import\\\\.yaml} | base64 --decode > import.yaml",
"login",
"apply -f klusterlet-crd.yaml",
"apply -f import.yaml",
"get managedcluster <cluster_name>",
"apiVersion: agent.open-cluster-management.io/v1 kind: KlusterletAddonConfig metadata: name: <cluster_name> namespace: <cluster_name> spec: applicationManager: enabled: true certPolicyController: enabled: true policyController: enabled: true searchCollector: enabled: true",
"apply -f klusterlet-addon-config.yaml",
"get pod -n open-cluster-management-agent-addon",
"delete managedcluster <cluster_name>",
"export agent_registration_host=USD(oc get route -n multicluster-engine agent-registration -o=jsonpath=\"{.spec.host}\")",
"get configmap -n kube-system kube-root-ca.crt -o=jsonpath=\"{.data['ca\\.crt']}\" > ca.crt_",
"apiVersion: v1 kind: ServiceAccount metadata: name: managed-cluster-import-agent-registration-sa namespace: multicluster-engine --- apiVersion: v1 kind: Secret type: kubernetes.io/service-account-token metadata: name: managed-cluster-import-agent-registration-sa-token namespace: multicluster-engine annotations: kubernetes.io/service-account.name: \"managed-cluster-import-agent-registration-sa\" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: managedcluster-import-controller-agent-registration-client rules: - nonResourceURLs: [\"/agent-registration/*\"] verbs: [\"get\"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: managed-cluster-import-agent-registration roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: managedcluster-import-controller-agent-registration-client subjects: - kind: ServiceAccount name: managed-cluster-import-agent-registration-sa namespace: multicluster-engine",
"export token=USD(oc get secret -n multicluster-engine managed-cluster-import-agent-registration-sa-token -o=jsonpath='{.data.token}' | base64 -d)",
"patch clustermanager cluster-manager --type=merge -p '{\"spec\":{\"registrationConfiguration\":{\"featureGates\":[ {\"feature\": \"ManagedClusterAutoApproval\", \"mode\": \"Enable\"}], \"autoApproveUsers\":[\"system:serviceaccount:multicluster-engine:agent-registration-bootstrap\"]}}}'",
"curl --cacert ca.crt -H \"Authorization: Bearer USDtoken\" https://USDagent_registration_host/agent-registration/crds/v1 | oc apply -f -",
"curl --cacert ca.crt -H \"Authorization: Bearer USDtoken\" https://USDagent_registration_host/agent-registration/manifests/<clusterName>?klusterletconfig=<klusterletconfigName> | oc apply -f -",
"apiVersion: v1 kind: Namespace metadata: name: managed-cluster",
"apiVersion: hive.openshift.io/v1 kind: ClusterImageSet metadata: name: openshift-v4.11.18 spec: releaseImage: quay.io/openshift-release-dev/ocp-release@sha256:22e149142517dfccb47be828f012659b1ccf71d26620e6f62468c264a7ce7863",
"apiVersion: v1 kind: Secret type: kubernetes.io/dockerconfigjson metadata: name: pull-secret namespace: managed-cluster stringData: .dockerconfigjson: <pull-secret-json> 1",
"get secret -n openshift-kube-apiserver node-kubeconfigs -ojson | jq '.data[\"lb-ext.kubeconfig\"]' --raw-output | base64 -d > /tmp/kubeconfig.some-other-cluster",
"-n managed-cluster create secret generic some-other-cluster-admin-kubeconfig --from-file=kubeconfig=/tmp/kubeconfig.some-other-cluster",
"apiVersion: extensions.hive.openshift.io/v1beta1 kind: AgentClusterInstall metadata: name: <your-cluster-name> 1 namespace: <managed-cluster> spec: networking: userManagedNetworking: true clusterDeploymentRef: name: <your-cluster> imageSetRef: name: openshift-v4.11.18 provisionRequirements: controlPlaneAgents: 2 sshPublicKey: <\"\"> 3",
"apiVersion: hive.openshift.io/v1 kind: ClusterDeployment metadata: name: <your-cluster-name> 1 namespace: managed-cluster spec: baseDomain: <redhat.com> 2 installed: <true> 3 clusterMetadata: adminKubeconfigSecretRef: name: <your-cluster-name-admin-kubeconfig> 4 clusterID: <\"\"> 5 infraID: <\"\"> 6 clusterInstallRef: group: extensions.hive.openshift.io kind: AgentClusterInstall name: your-cluster-name-install version: v1beta1 clusterName: your-cluster-name platform: agentBareMetal: pullSecretRef: name: pull-secret",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: your-infraenv namespace: managed-cluster spec: clusterRef: name: your-cluster-name namespace: managed-cluster pullSecretRef: name: pull-secret sshAuthorizedKey: \"\"",
"get infraenv -n managed-cluster some-other-infraenv -ojson | jq \".status.<url>\" --raw-output | xargs curl -k -o /storage0/isos/some-other.iso",
"apiVersion: imageregistry.open-cluster-management.io/v1alpha1 kind: ManagedClusterImageRegistry metadata: name: <imageRegistryName> namespace: <namespace> spec: placementRef: group: cluster.open-cluster-management.io resource: placements name: <placementName> 1 pullSecret: name: <pullSecretName> 2 registries: 3 - mirror: <mirrored-image-registry-address> source: <image-registry-address> - mirror: <mirrored-image-registry-address> source: <image-registry-address>",
"registries: - mirror: localhost:5000/rhacm2/ source: registry.redhat.io/rhacm2 - mirror: localhost:5000/multicluster-engine source: registry.redhat.io/multicluster-engine",
"registries: - mirror: localhost:5000/rhacm2-registration-rhel8-operator source: registry.redhat.io/rhacm2/registration-rhel8-operator",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: <klusterletconfigName> spec: pullSecret: namespace: <pullSecretNamespace> name: <pullSecretName> registries: - mirror: <mirrored-image-registry-address> source: <image-registry-address> - mirror: <mirrored-image-registry-address> source: <image-registry-address>",
"kubectl create secret docker-registry myPullSecret --docker-server=<your-registry-server> --docker-username=<my-name> --docker-password=<my-password>",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: myPlacement namespace: myNamespace spec: clusterSets: - myClusterSet tolerations: - key: \"cluster.open-cluster-management.io/unreachable\" operator: Exists",
"apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSet metadata: name: myClusterSet --- apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: name: myClusterSet namespace: myNamespace spec: clusterSet: myClusterSet",
"apiVersion: imageregistry.open-cluster-management.io/v1alpha1 kind: ManagedClusterImageRegistry metadata: name: myImageRegistry namespace: myNamespace spec: placementRef: group: cluster.open-cluster-management.io resource: placements name: myPlacement pullSecret: name: myPullSecret registry: myRegistryAddress",
"get machinepools -n <managed-cluster-namespace>",
"edit machinepool <MachinePool-resource-name> -n <managed-cluster-namespace>",
"get machinepools -n <managed-cluster-namespace>",
"edit machinepool <name-of-MachinePool-resource> -n <namespace-of-managed-cluster>",
"get machinepools -n <managed-cluster-namespace>",
"edit machinepool <MachinePool-resource-name> -n <managed-cluster-namespace>",
"export <kubeconfig_name>=USD(oc get cd USD<cluster_name> -o \"jsonpath={.spec.clusterMetadata.adminKubeconfigSecretRef.name}\") extract secret/USD<kubeconfig_name> --keys=kubeconfig --to=- > original-kubeconfig --kubeconfig=original-kubeconfig get node",
"Unable to connect to the server: tls: failed to verify certificate: x509: certificate signed by unknown authority",
"echo <base64 encoded blob> | base64 --decode > decoded-existing-certs.pem",
"cp original-kubeconfig <new_kubeconfig_name>",
"cat decoded-existing-certs.pem new-ca-certificate.pem | openssl base64 -A",
"KUBECONFIG=<new_kubeconfig_name> oc get nodes",
"patch secret USDoriginal-kubeconfig --type='json' -p=\"[{'op': 'replace', 'path': '/data/kubeconfig', 'value': 'USD(openssl base64 -A -in <new_kubeconfig_name>)'},{'op': 'replace', 'path': '/data/raw-kubeconfig', 'value': 'USD(openssl base64 -A -in <new_kubeconfig_name>)'}]\"",
"watch -n 5 \"oc get agent -n managed-cluster\"",
"get agent -n managed-cluster -ojson | jq -r '.items[] | select(.spec.approved==false) |select(.spec.clusterDeploymentName==null) | .metadata.name'| xargs oc -n managed-cluster patch -p '{\"spec\":{\"clusterDeploymentName\":{\"name\":\"some-other-cluster\",\"namespace\":\"managed-cluster\"}}}' --type merge agent",
"get agent -n managed-cluster -ojson | jq -r '.items[] | select(.spec.approved==false) | .metadata.name'| xargs oc -n managed-cluster patch -p '{\"spec\":{\"approved\":true}}' --type merge agent",
"patch agent <AGENT-NAME> -p '{\"spec\":{\"role\": \"master\"}}' --type=merge",
"bmac.agent-install.openshift.io/role: master",
"patch agent <AGENT-NAME> -p '{\"spec\":{\"role\": \"master\"}}' --type=merge",
"edit clusterdeployment <name-of-cluster> -n <namespace-of-cluster>",
"get clusterdeployment <name-of-cluster> -n <namespace-of-cluster> -o yaml",
"edit clusterdeployment <name-of-cluster> -n <namespace-of-cluster>",
"get clusterdeployment <name-of-cluster> -n <namespace-of-cluster> -o yaml",
"UPSTREAM_REGISTRY=quay.io PRODUCT_REPO=openshift-release-dev RELEASE_NAME=ocp-release OCP_RELEASE=4.12.2-x86_64 LOCAL_REGISTRY=USD(hostname):5000 LOCAL_SECRET_JSON=/path/to/pull/secret 1 adm -a USD{LOCAL_SECRET_JSON} release mirror --from=USD{UPSTREAM_REGISTRY}/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE} --to=USD{LOCAL_REGISTRY}/ocp4 --to-release-image=USD{LOCAL_REGISTRY}/ocp4/release:USD{OCP_RELEASE}",
"git clone https://github.com/openshift/cincinnati-graph-data",
"FROM registry.access.redhat.com/ubi8/ubi:8.1 1 RUN curl -L -o cincinnati-graph-data.tar.gz https://github.com/openshift/cincinnati-graph-data/archive/master.tar.gz 2 RUN mkdir -p /var/lib/cincinnati/graph-data/ 3 CMD exec /bin/bash -c \"tar xvzf cincinnati-graph-data.tar.gz -C /var/lib/ cincinnati/graph-data/ --strip-components=1\" 4",
"build -f <path_to_Dockerfile> -t <USD{DISCONNECTED_REGISTRY}/cincinnati/cincinnati-graph-data-container>:latest 1 2 push <USD{DISCONNECTED_REGISTRY}/cincinnati/cincinnati-graph-data-container><2>:latest --authfile=</path/to/pull_secret>.json 3",
"apiVersion: v1 kind: ConfigMap metadata: name: trusted-ca data: updateservice-registry: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE-----",
"patch image.config.openshift.io cluster -p '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"trusted-ca\"}}}' --type merge",
"apiVersion: cincinnati.openshift.io/v1beta2 kind: Cincinnati metadata: name: openshift-update-service-instance namespace: openshift-cincinnati spec: registry: <registry_host_name>:<port> 1 replicas: 1 repository: USD{LOCAL_REGISTRY}/ocp4/release graphDataImage: '<host_name>:<port>/cincinnati-graph-data-container' 2",
"apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: name: <your-local-mirror-name> 1 spec: repositoryDigestMirrors: - mirrors: - <your-registry> 2 source: registry.redhat.io",
"apply -f mirror.yaml",
"apiVersion: config.openshift.io/v1 kind: OperatorHub metadata: name: cluster spec: disableAllDefaultSources: true --- apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: name: my-operator-catalog namespace: openshift-marketplace spec: sourceType: grpc image: '<registry_host_name>:<port>/olm/redhat-operators:v1' 1 displayName: My Operator Catalog publisher: grpc",
"apply -f source.yaml",
"get clusterversion -o yaml",
"apiVersion: v1 items: - apiVersion: config.openshift.io/v1 kind: ClusterVersion [..] spec: channel: stable-4.x upstream: https://api.openshift.com/api/upgrades_info/v1/graph",
"get routes",
"edit clusterversion version",
"get routes -A",
"get clusterversion -o yaml",
"apiVersion: v1 items: - apiVersion: config.openshift.io/v1 kind: ClusterVersion [..] spec: channel: stable-4.x upstream: https://<hub-cincinnati-uri>/api/upgrades_info/v1/graph",
"export KUBECONFIG=<managed-cluster-kubeconfig>",
"create role -n default test-role --verb=list,get --resource=pods create rolebinding -n default test-rolebinding --serviceaccount=default:default --role=test-role",
"get secret -n default | grep <default-token>",
"export MANAGED_CLUSTER_TOKEN=USD(kubectl -n default get secret <default-token> -o jsonpath={.data.token} | base64 -d)",
"config view --minify --raw=true > cluster-proxy.kubeconfig",
"export TARGET_MANAGED_CLUSTER=<managed-cluster-name> export NEW_SERVER=https://USD(oc get route -n multicluster-engine cluster-proxy-addon-user -o=jsonpath='{.spec.host}')/USDTARGET_MANAGED_CLUSTER sed -i'' -e '/server:/c\\ server: '\"USDNEW_SERVER\"'' cluster-proxy.kubeconfig export CADATA=USD(oc get configmap -n openshift-service-ca kube-root-ca.crt -o=go-template='{{index .data \"ca.crt\"}}' | base64) sed -i'' -e '/certificate-authority-data:/c\\ certificate-authority-data: '\"USDCADATA\"'' cluster-proxy.kubeconfig",
"sed -i'' -e '/client-certificate-data/d' cluster-proxy.kubeconfig sed -i'' -e '/client-key-data/d' cluster-proxy.kubeconfig sed -i'' -e '/token/d' cluster-proxy.kubeconfig",
"sed -i'' -e 'USDa\\ token: '\"USDMANAGED_CLUSTER_TOKEN\"'' cluster-proxy.kubeconfig",
"get pods --kubeconfig=cluster-proxy.kubeconfig -n <default>",
"export PROMETHEUS_TOKEN=USD(kubectl get secret -n openshift-monitoring USD(kubectl get serviceaccount -n openshift-monitoring prometheus-k8s -o=jsonpath='{.secrets[0].name}') -o=jsonpath='{.data.token}' | base64 -d)",
"get configmap kube-root-ca.crt -o=jsonpath='{.data.ca\\.crt}' > hub-ca.crt",
"export SERVICE_NAMESPACE=openshift-monitoring export SERVICE_NAME=prometheus-k8s export SERVICE_PORT=9091 export SERVICE_PATH=\"api/v1/query?query=machine_cpu_sockets\" curl --cacert hub-ca.crt USDNEW_SERVER/api/v1/namespaces/USDSERVICE_NAMESPACE/services/USDSERVICE_NAME:USDSERVICE_PORT/proxy-service/USDSERVICE_PATH -H \"Authorization: Bearer USDPROMETHEUS_TOKEN\"",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: <name> 1 namespace: <namespace> 2 spec: agentInstallNamespace: open-cluster-managment-agent-addon proxyConfig: httpsProxy: \"http://<username>:<password>@<ip>:<port>\" 3 noProxy: \".cluster.local,.svc,172.30.0.1\" 4 caBundle: <value> 5",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ManagedClusterAddOn metadata: name: cluster-proxy namespace: <namespace> 1 spec: installNamespace: open-cluster-managment-addon configs: group: addon.open-cluster-management.io resource: AddonDeploymentConfig name: <name> 2 namespace: <namespace> 3",
"operation: retryPosthook: installPosthook",
"operation: retryPosthook: upgradePosthook",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: test-inno namespace: test-inno spec: desiredCuration: upgrade destroy: {} install: {} scale: {} upgrade: channel: stable-4.x desiredUpdate: 4.x.1 monitorTimeout: 150 posthook: - extra_vars: {} clusterName: test-inno type: post_check name: ACM Upgrade Checks prehook: - extra_vars: {} clusterName: test-inno type: pre_check name: ACM Upgrade Checks towerAuthSecret: awx inventory: Demo Inventory",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: cluster1 {{{} namespace: cluster1 labels: test1: test1 test2: test2 {}}}spec: desiredCuration: install install: jobMonitorTimeout: 5 posthook: - extra_vars: {} name: Demo Job Template type: Job prehook: - extra_vars: {} name: Demo Job Template type: Job towerAuthSecret: toweraccess",
"spec: desiredCuration: upgrade upgrade: intermediateUpdate: 4.13.x desiredUpdate: 4.14.x monitorTimeout: 120",
"posthook: - extra_vars: {} name: Unpause machinepool type: Job prehook: - extra_vars: {} name: Pause machinepool type: Job",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: annotations: cluster.open-cluster-management.io/upgrade-clusterversion-backoff-limit: \"10\" name: your-name namespace: your-namespace spec: desiredCuration: upgrade upgrade: intermediateUpdate: 4.13.x desiredUpdate: 4.14.x monitorTimeout: 120 posthook: - extra_vars: {} name: Unpause machinepool type: Job prehook: - extra_vars: {} name: Pause machinepool type: Job",
"apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: my-cluster namespace: clusters spec: pausedUntil: 'true'",
"apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: name: my-cluster-us-east-2 namespace: clusters spec: pausedUntil: 'true'",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: my-cluster namespace: clusters labels: open-cluster-management: curator spec: desiredCuration: install install: jobMonitorTimeout: 5 prehook: - name: Demo Job Template extra_vars: variable1: something-interesting variable2: 2 - name: Demo Job Template posthook: - name: Demo Job Template towerAuthSecret: toweraccess",
"apiVersion: v1 kind: Secret metadata: name: toweraccess namespace: clusters stringData: host: https://my-tower-domain.io token: ANSIBLE_TOKEN_FOR_admin",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: my-cluster namespace: clusters labels: open-cluster-management: curator spec: desiredCuration: upgrade upgrade: desiredUpdate: 4.14.1 1 monitorTimeout: 120 prehook: - name: Demo Job Template extra_vars: variable1: something-interesting variable2: 2 - name: Demo Job Template posthook: - name: Demo Job Template towerAuthSecret: toweraccess",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: ClusterCurator metadata: name: my-cluster namespace: clusters labels: open-cluster-management: curator spec: desiredCuration: destroy destroy: jobMonitorTimeout: 5 prehook: - name: Demo Job Template extra_vars: variable1: something-interesting variable2: 2 - name: Demo Job Template posthook: - name: Demo Job Template towerAuthSecret: toweraccess",
"apiVersion: cluster.open-cluster-management.io/v1alpha1 kind: ClusterClaim metadata: name: id.openshift.io spec: value: 95f91f25-d7a2-4fc3-9237-2ef633d8451c",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: labels: cloud: Amazon clusterID: 95f91f25-d7a2-4fc3-9237-2ef633d8451c installer.name: multiclusterhub installer.namespace: open-cluster-management name: cluster1 vendor: OpenShift name: cluster1 spec: hubAcceptsClient: true leaseDurationSeconds: 60 status: allocatable: cpu: '15' memory: 65257Mi capacity: cpu: '18' memory: 72001Mi clusterClaims: - name: id.k8s.io value: cluster1 - name: kubeversion.open-cluster-management.io value: v1.18.3+6c42de8 - name: platform.open-cluster-management.io value: AWS - name: product.open-cluster-management.io value: OpenShift - name: id.openshift.io value: 95f91f25-d7a2-4fc3-9237-2ef633d8451c - name: consoleurl.openshift.io value: 'https://console-openshift-console.apps.xxxx.dev04.red-chesterfield.com' - name: version.openshift.io value: '4.x' conditions: - lastTransitionTime: '2020-10-26T07:08:49Z' message: Accepted by hub cluster admin reason: HubClusterAdminAccepted status: 'True' type: HubAcceptedManagedCluster - lastTransitionTime: '2020-10-26T07:09:18Z' message: Managed cluster joined reason: ManagedClusterJoined status: 'True' type: ManagedClusterJoined - lastTransitionTime: '2020-10-30T07:20:20Z' message: Managed cluster is available reason: ManagedClusterAvailable status: 'True' type: ManagedClusterConditionAvailable version: kubernetes: v1.18.3+6c42de8",
"apiVersion: cluster.open-cluster-management.io/v1alpha1 kind: ClusterClaim metadata: name: <custom_claim_name> spec: value: <custom_claim_value>",
"get clusterclaims.cluster.open-cluster-management.io",
"apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: name: global namespace: open-cluster-management-global-set spec: clusterSet: global",
"apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSet metadata: name: <cluster_set>",
"kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: clusterrole1 rules: - apiGroups: [\"cluster.open-cluster-management.io\"] resources: [\"managedclustersets/join\"] resourceNames: [\"<cluster_set>\"] verbs: [\"create\"]",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: <cluster_name> spec: hubAcceptsClient: true",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: <cluster_name> labels: cluster.open-cluster-management.io/clusterset: <cluster_set> spec: hubAcceptsClient: true",
"apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: namespace: <namespace> name: <cluster_set> spec: clusterSet: <cluster_set>",
"apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: <clusterrole> rules: - apiGroups: [\"cluster.open-cluster-management.io\"] resources: [\"managedclustersets/bind\"] resourceNames: [\"<cluster_set>\"] verbs: [\"create\"]",
"patch managedcluster <managed_cluster_name> -p '{\"spec\":{\"taints\":[{\"key\": \"key\", \"value\": \"value\", \"effect\": \"NoSelect\"}]}}' --type=merge",
"patch managedcluster <managed_cluster_name> --type='json' -p='[{\"op\": \"add\", \"path\": \"/spec/taints/-\", \"value\": {\"key\": \"key\", \"value\": \"value\", \"effect\": \"NoSelect\"}}]'",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unavailable timeAdded: '2022-02-21T08:11:54Z'",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unreachable timeAdded: '2022-02-21T08:11:06Z'",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: gpu value: \"true\" timeAdded: '2022-02-21T08:11:06Z'",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement1 namespace: default spec: tolerations: - key: gpu value: \"true\" operator: Equal",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unreachable timeAdded: '2022-02-21T08:11:06Z'",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: demo4 namespace: demo1 spec: tolerations: - key: cluster.open-cluster-management.io/unreachable operator: Exists tolerationSeconds: 300",
"get managedclusters -l cluster.open-cluster-management.io/clusterset=<cluster_set>",
"labels: cluster.open-cluster-management.io/clusterset: clusterset1",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchLabels: vendor: OpenShift",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: predicates: - requiredClusterSelector: claimSelector: matchExpressions: - key: region.open-cluster-management.io operator: In values: - us-west-1",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: clusterSets: - clusterset1 - clusterset2 predicates: - requiredClusterSelector: claimSelector: matchExpressions: - key: region.open-cluster-management.io operator: In values: - us-west-1",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 3 1 predicates: - requiredClusterSelector: labelSelector: matchLabels: vendor: OpenShift claimSelector: matchExpressions: - key: region.open-cluster-management.io operator: In values: - us-west-1",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: gpu value: \"true\" timeAdded: '2022-02-21T08:11:06Z'",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: tolerations: - key: gpu value: \"true\" operator: Equal",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: name: cluster1 spec: hubAcceptsClient: true taints: - effect: NoSelect key: cluster.open-cluster-management.io/unreachable timeAdded: '2022-02-21T08:11:06Z'",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: tolerations: - key: cluster.open-cluster-management.io/unreachable operator: Exists tolerationSeconds: 300 1",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 1 prioritizerPolicy: configurations: - scoreCoordinate: builtIn: ResourceAllocatableMemory",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 1 prioritizerPolicy: configurations: - scoreCoordinate: builtIn: ResourceAllocatableCPU weight: 2 - scoreCoordinate: builtIn: ResourceAllocatableMemory weight: 2",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement namespace: ns1 spec: numberOfClusters: 2 prioritizerPolicy: mode: Exact configurations: - scoreCoordinate: builtIn: Steady weight: 3 - scoreCoordinate: type: AddOn addOn: resourceName: default scoreName: cpuratio",
"feature.open-cluster-management.io/addon-application-manager: available",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement1 namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: feature.open-cluster-management.io/addon-application-manager operator: Exists",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement2 namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchLabels: \"feature.open-cluster-management.io/addon-application-manager\": \"available\"",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: placement3 namespace: ns1 spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: feature.open-cluster-management.io/addon-application-manager operator: DoesNotExist",
"apiVersion: cluster.open-cluster-management.io/v1beta1 kind: PlacementDecision metadata: labels: cluster.open-cluster-management.io/placement: placement1 name: placement1-kbc7q namespace: ns1 ownerReferences: - apiVersion: cluster.open-cluster-management.io/v1beta1 blockOwnerDeletion: true controller: true kind: Placement name: placement1 uid: 05441cf6-2543-4ecc-8389-1079b42fe63e status: decisions: - clusterName: cluster1 reason: '' - clusterName: cluster2 reason: '' - clusterName: cluster3 reason: ''",
"kind: ClusterClaim metadata: annotations: cluster.open-cluster-management.io/createmanagedcluster: \"false\" 1",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ManagedClusterAddOn metadata: name: managed-serviceaccount namespace: <target_managed_cluster> spec: installNamespace: open-cluster-management-agent-addon",
"apply -f -",
"apiVersion: authentication.open-cluster-management.io/v1alpha1 kind: ManagedServiceAccount metadata: name: <managedserviceaccount_name> namespace: <target_managed_cluster> spec: rotation: {}",
"get managedserviceaccount <managed_serviceaccount_name> -n <target_managed_cluster> -o yaml",
"get secret <managed_serviceaccount_name> -n <target_managed_cluster> -o yaml",
"apiVersion: config.openshift.io/v1 kind: APIServer metadata: name: cluster spec: audit: profile: Default servingCerts: namedCertificates: - names: - api.mycluster.example.com servingCertificate: name: old-cert-secret",
"cp old.crt combined.crt",
"cat new.crt >> combined.crt",
"create secret tls combined-certs-secret --cert=combined.crt --key=old.key -n openshift-config",
"apiVersion: config.openshift.io/v1 kind: APIServer metadata: name: cluster spec: audit: profile: Default servingCerts: namedCertificates: - names: - api.mycluster.example.com servingCertificate: name: combined-cert-secret",
"create secret tls new-cert-secret --cert=new.crt --key=new.key -n openshift-config {code}",
"apiVersion: config.openshift.io/v1 kind: APIServer metadata: name: cluster spec: audit: profile: Default servingCerts: namedCertificates: - names: - api.mycluster.example.com servingCertificate: name: new-cert-secret",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: http-proxy spec: hubKubeAPIServerProxyConfig: httpProxy: \"http://<username>:<password>@<ip>:<port>\"",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: https-proxy spec: hubKubeAPIServerProxyConfig: httpsProxy: \"https://<username>:<password>@<ip>:<port>\" caBundle: <user-ca-bundle>",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: agent.open-cluster-management.io/klusterlet-config: <klusterlet-config-name> name:<managed-cluster-name> spec: hubAcceptsClient: true leaseDurationSeconds: 60",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: open-cluster-management/nodeSelector: '{\"dedicated\":\"acm\"}' open-cluster-management/tolerations: '[{\"key\":\"dedicated\",\"operator\":\"Equal\",\"value\":\"acm\",\"effect\":\"NoSchedule\"}]'",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: 1 spec: hubKubeAPIServerURL: \"https://api.example.com:6443\" 2 hubKubeAPIServerCABundle: \"LS0tLS1CRU...LS0tCg==\" 3",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: agent.open-cluster-management.io/klusterlet-config: 1 name: 2 spec: hubAcceptsClient: true leaseDurationSeconds: 60",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: test spec: hubKubeAPIServerURL: \"example.test.com\" - apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerURL: \"example.global.com\"",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: test spec: hubKubeAPIServerURL: \"\" - apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerURL: \"example.global.com\"",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: test - apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerURL: \"example.global.com\"",
"delete po -n open-cluster-management `oc get pod -n open-cluster-management | grep multiclusterhub-operator| cut -d' ' -f1`",
"delete managedcluster USDCLUSTER_NAME",
"delete clusterdeployment <CLUSTER_NAME> -n USDCLUSTER_NAME",
"delete po -n open-cluster-management `oc get pod -n open-cluster-management | grep multiclusterhub-operator| cut -d' ' -f1`",
"open-cluster-management-agent Active 10m open-cluster-management-agent-addon Active 10m",
"get klusterlet | grep klusterlet | awk '{print USD1}' | xargs oc patch klusterlet --type=merge -p '{\"metadata\":{\"finalizers\": []}}'",
"delete namespaces open-cluster-management-agent open-cluster-management-agent-addon --wait=false get crds | grep open-cluster-management.io | awk '{print USD1}' | xargs oc delete crds --wait=false get crds | grep open-cluster-management.io | awk '{print USD1}' | xargs oc patch crds --type=merge -p '{\"metadata\":{\"finalizers\": []}}'",
"get crds | grep open-cluster-management.io | awk '{print USD1}' get ns | grep open-cluster-management-agent",
"oc rsh -n openshift-etcd etcd-control-plane-0.example.com etcdctl endpoint status --cluster -w table",
"sh-4.4#etcdctl compact USD(etcdctl endpoint status --write-out=\"json\" | egrep -o '\"revision\":[0-9]*' | egrep -o '[0-9]*' -m1)",
"compacted revision 158774421",
"apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveryConfig metadata: name: discovery namespace: <NAMESPACE_NAME> spec: credential: <SECRET_NAME> filters: lastActive: 7 openshiftVersions: - \"4.14\"",
"apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: fd51aafa-95a8-41f7-a992-6fb95eed3c8e namespace: <NAMESPACE_NAME> spec: activity_timestamp: \"2021-04-19T21:06:14Z\" cloudProvider: vsphere console: https://console-openshift-console.apps.qe1-vmware-pkt.dev02.red-chesterfield.com creation_timestamp: \"2021-04-19T16:29:53Z\" credential: apiVersion: v1 kind: Secret name: <SECRET_NAME> namespace: <NAMESPACE_NAME> display_name: qe1-vmware-pkt.dev02.red-chesterfield.com name: fd51aafa-95a8-41f7-a992-6fb95eed3c8e openshiftVersion: 4.14 status: Stale",
"apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: name: 28c17977-fc73-4050-b5cc-a5aa2d1d6892 namespace: discovery spec: openshiftVersion: <4.x.z> isManagedCluster: false cloudProvider: aws name: 28c17977-fc73-4050-b5cc-a5aa2d1d6892 displayName: rosa-dc status: Active importAsManagedCluster: true 1 type: <supported-type> 2",
"apiVersion: discovery.open-cluster-management.io/v1 kind: DiscoveredCluster metadata: annotations: discovery.open-cluster-management.io/previously-auto-imported: 'true'",
"2024-06-12T14:11:43.366Z INFO reconcile Skipped automatic import for DiscoveredCluster due to existing 'discovery.open-cluster-management.io/previously-auto-imported' annotation {\"Name\": \"rosa-dc\"}",
"patch discoveredcluster <name> -n <namespace> --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/importAsManagedCluster\", \"value\": true}]'",
"get managedcluster <name>",
"rosa describe cluster --cluster=<cluster-name> | grep -o '^ID:.*",
"create -f <your-config-map-file.yaml>",
"kind: ConfigMap apiVersion: v1 metadata: name: hcp-sizing-baseline namespace: local-cluster data: incrementalCPUUsagePer1KQPS: \"9.0\" memoryRequestPerHCP: \"18\" minimumQPSPerHCP: \"50.0\"",
"delete deployment hypershift-addon-agent -n open-cluster-management-agent-addon",
"logs hypershift-addon-agent -n open-cluster-management-agent-addon",
"2024-01-05T19:41:05.392Z INFO agent.agent-reconciler agent/agent.go:793 setting cpuRequestPerHCP to 5 2024-01-05T19:41:05.392Z INFO agent.agent-reconciler agent/agent.go:802 setting memoryRequestPerHCP to 18 2024-01-05T19:53:54.070Z INFO agent.agent-reconciler agent/hcp_capacity_calculation.go:141 The worker nodes have 12.000000 vCPUs 2024-01-05T19:53:54.070Z INFO agent.agent-reconciler agent/hcp_capacity_calculation.go:142 The worker nodes have 49.173369 GB memory",
"2024-01-05T19:53:54.052Z ERROR agent.agent-reconciler agent/agent.go:788 failed to get configmap from the hub. Setting the HCP sizing baseline with default values. {\"error\": \"configmaps \\\"hcp-sizing-baseline\\\" not found\"}",
"login",
"edit addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: hypershift-addon-deploy-config namespace: multicluster-engine spec: customizedVariables: - name: hcMaxNumber value: \"80\" - name: hcThresholdNumber value: \"60\" - name: disableMetrics value: \"true\"",
"tar xvzf hcp.tar.gz",
"chmod +x hcp",
"sudo mv hcp /usr/local/bin/.",
"hcp create cluster <platform> --help 1",
"get ConsoleCLIDownload hcp-cli-download -o json | jq -r \".spec\"",
"wget <hcp_cli_download_url> 1",
"tar xvzf hcp.tar.gz",
"chmod +x hcp",
"sudo mv hcp /usr/local/bin/.",
"tar xvzf hcp.tar.gz",
"chmod +x hcp",
"sudo mv hcp /usr/local/bin/.",
"hcp create cluster <platform> --help 1",
"label node/worker-1a node/worker-1b topology.kubernetes.io/zone=rack1 label node/worker-2a node/worker-2b topology.kubernetes.io/zone=rack2",
"spec: nodeSelector: role.kubernetes.io/infra: \"\"",
"get managedclusters local-cluster",
"aws s3api create-bucket --bucket <your-bucket-name> aws s3api delete-public-access-block --bucket <your-bucket-name> echo '{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": \"*\", \"Action\": \"s3:GetObject\", \"Resource\": \"arn:aws:s3:::<your-bucket-name>/*\" } ] }' | envsubst > policy.json aws s3api put-bucket-policy --bucket <your-bucket-name> --policy file://policy.json",
"aws s3api create-bucket --bucket <your-bucket-name> --create-bucket-configuration LocationConstraint=<region> --region <region> aws s3api delete-public-access-block --bucket <your-bucket-name> echo '{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": \"*\", \"Action\": \"s3:GetObject\", \"Resource\": \"arn:aws:s3:::<your-bucket-name>/*\" } ] }' | envsubst > policy.json aws s3api put-bucket-policy --bucket <your-bucket-name> --policy file://policy.json",
"create secret generic hypershift-operator-oidc-provider-s3-credentials --from-file=credentials=<path>/.aws/credentials --from-literal=bucket=<s3-bucket-for-hypershift> --from-literal=region=<region> -n local-cluster",
"label secret hypershift-operator-oidc-provider-s3-credentials -n local-cluster cluster.open-cluster-management.io/backup=true",
"aws route53 create-hosted-zone --name <your-basedomain> --caller-reference USD(whoami)-USD(date --rfc-3339=date)",
"aws sts get-caller-identity --query \"Arn\" --output text",
"arn:aws:iam::1234567890:user/<aws-username>",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"<arn>\" }, \"Action\": \"sts:AssumeRole\" } ] }",
"aws iam create-role --role-name <hcp-cli-role> --assume-role-policy-document file://trust-relationship.json --query \"Role.Arn\" --output json",
"arn:aws:iam::820196288204:role/myrole",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Sid\": \"EC2\", \"Effect\": \"Allow\", \"Action\": [ \"ec2:CreateDhcpOptions\", \"ec2:DeleteSubnet\", \"ec2:ReplaceRouteTableAssociation\", \"ec2:DescribeAddresses\", \"ec2:DescribeInstances\", \"ec2:DeleteVpcEndpoints\", \"ec2:CreateNatGateway\", \"ec2:CreateVpc\", \"ec2:DescribeDhcpOptions\", \"ec2:AttachInternetGateway\", \"ec2:DeleteVpcEndpointServiceConfigurations\", \"ec2:DeleteRouteTable\", \"ec2:AssociateRouteTable\", \"ec2:DescribeInternetGateways\", \"ec2:DescribeAvailabilityZones\", \"ec2:CreateRoute\", \"ec2:CreateInternetGateway\", \"ec2:RevokeSecurityGroupEgress\", \"ec2:ModifyVpcAttribute\", \"ec2:DeleteInternetGateway\", \"ec2:DescribeVpcEndpointConnections\", \"ec2:RejectVpcEndpointConnections\", \"ec2:DescribeRouteTables\", \"ec2:ReleaseAddress\", \"ec2:AssociateDhcpOptions\", \"ec2:TerminateInstances\", \"ec2:CreateTags\", \"ec2:DeleteRoute\", \"ec2:CreateRouteTable\", \"ec2:DetachInternetGateway\", \"ec2:DescribeVpcEndpointServiceConfigurations\", \"ec2:DescribeNatGateways\", \"ec2:DisassociateRouteTable\", \"ec2:AllocateAddress\", \"ec2:DescribeSecurityGroups\", \"ec2:RevokeSecurityGroupIngress\", \"ec2:CreateVpcEndpoint\", \"ec2:DescribeVpcs\", \"ec2:DeleteSecurityGroup\", \"ec2:DeleteDhcpOptions\", \"ec2:DeleteNatGateway\", \"ec2:DescribeVpcEndpoints\", \"ec2:DeleteVpc\", \"ec2:CreateSubnet\", \"ec2:DescribeSubnets\" ], \"Resource\": \"*\" }, { \"Sid\": \"ELB\", \"Effect\": \"Allow\", \"Action\": [ \"elasticloadbalancing:DeleteLoadBalancer\", \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DeleteTargetGroup\" ], \"Resource\": \"*\" }, { \"Sid\": \"IAMPassRole\", \"Effect\": \"Allow\", \"Action\": \"iam:PassRole\", \"Resource\": \"arn:*:iam::*:role/*-worker-role\", \"Condition\": { \"ForAnyValue:StringEqualsIfExists\": { \"iam:PassedToService\": \"ec2.amazonaws.com\" } } }, { \"Sid\": \"IAM\", \"Effect\": \"Allow\", \"Action\": [ \"iam:CreateInstanceProfile\", \"iam:DeleteInstanceProfile\", \"iam:GetRole\", \"iam:UpdateAssumeRolePolicy\", \"iam:GetInstanceProfile\", \"iam:TagRole\", \"iam:RemoveRoleFromInstanceProfile\", \"iam:CreateRole\", \"iam:DeleteRole\", \"iam:PutRolePolicy\", \"iam:AddRoleToInstanceProfile\", \"iam:CreateOpenIDConnectProvider\", \"iam:TagOpenIDConnectProvider\", \"iam:ListOpenIDConnectProviders\", \"iam:DeleteRolePolicy\", \"iam:UpdateRole\", \"iam:DeleteOpenIDConnectProvider\", \"iam:GetRolePolicy\" ], \"Resource\": \"*\" }, { \"Sid\": \"Route53\", \"Effect\": \"Allow\", \"Action\": [ \"route53:ListHostedZonesByVPC\", \"route53:CreateHostedZone\", \"route53:ListHostedZones\", \"route53:ChangeResourceRecordSets\", \"route53:ListResourceRecordSets\", \"route53:DeleteHostedZone\", \"route53:AssociateVPCWithHostedZone\", \"route53:ListHostedZonesByName\" ], \"Resource\": \"*\" }, { \"Sid\": \"S3\", \"Effect\": \"Allow\", \"Action\": [ \"s3:ListAllMyBuckets\", \"s3:ListBucket\", \"s3:DeleteObject\", \"s3:DeleteBucket\" ], \"Resource\": \"*\" } ] }",
"aws iam put-role-policy --role-name <role_name> \\ 1 --policy-name <policy_name> \\ 2 --policy-document file://policy.json 3",
"aws sts get-session-token --output json > sts-creds.json",
"{ \"Credentials\": { \"AccessKeyId\": \"ASIA1443CE0GN2ATHWJU\", \"SecretAccessKey\": \"XFLN7cZ5AP0d66KhyI4gd8Mu0UCQEDN9cfelW1\", \"SessionToken\": \"IQoJb3JpZ2luX2VjEEAaCXVzLWVhc3QtMiJHMEUCIDyipkM7oPKBHiGeI0pMnXst1gDLfs/TvfskXseKCbshAiEAnl1l/Html7Iq9AEIqf//////////KQburfkq4A3TuppHMr/9j1TgCj1z83SO261bHqlJUazKoy7vBFR/a6LHt55iMBqtKPEsIWjBgj/jSdRJI3j4Gyk1//luKDytcfF/tb9YrxDTPLrACS1lqAxSIFZ82I/jDhbDs=\", \"Expiration\": \"2025-05-16T04:19:32+00:00\" } }",
"create secret generic hypershift-operator-external-dns-credentials --from-literal=provider=aws --from-literal=domain-filter=<domain_name> --from-file=credentials=<path_to_aws_credentials_file> -n local-cluster",
"label secret hypershift-operator-external-dns-credentials -n local-cluster cluster.open-cluster-management.io/backup=\"\"",
"dig +short test.user-dest-public.aws.kerberos.com 192.168.1.1",
"hcp create cluster aws --name=<hosted_cluster_name> --endpoint-access=PublicAndPrivate --external-dns-domain=<public_hosted_zone>",
"platform: aws: endpointAccess: PublicAndPrivate services: - service: APIServer servicePublishingStrategy: route: hostname: api-example.service-provider-domain.com type: Route - service: OAuthServer servicePublishingStrategy: route: hostname: oauth-example.service-provider-domain.com type: Route - service: Konnectivity servicePublishingStrategy: type: Route - service: Ignition servicePublishingStrategy: type: Route",
"export KUBECONFIG=<path_to_management_cluster_kubeconfig>",
"get pod -n hypershift -lapp=external-dns",
"NAME READY STATUS RESTARTS AGE external-dns-7c89788c69-rn8gp 1/1 Running 0 40s",
"hcp create cluster aws --role-arn <arn_role> \\ 1 --instance-type <instance_type> \\ 2 --region <region> \\ 3 --auto-repair --generate-ssh --name <hosted_cluster_name> \\ 4 --namespace clusters --base-domain <service_consumer_domain> \\ 5 --node-pool-replicas <node_replica_count> \\ 6 --pull-secret <path_to_your_pull_secret> \\ 7 --release-image quay.io/openshift-release-dev/ocp-release:<ocp_release_image> \\ 8 --external-dns-domain=<service_provider_domain> \\ 9 --endpoint-access=PublicAndPrivate 10 --sts-creds <path_to_sts_credential_file> 11",
"create secret generic hypershift-operator-private-link-credentials --from-literal=aws-access-key-id=<aws-access-key-id> --from-literal=aws-secret-access-key=<aws-secret-access-key> --from-literal=region=<region> -n local-cluster",
"label secret hypershift-operator-private-link-credentials -n local-cluster cluster.open-cluster-management.io/backup=\"\"",
"hcp create cluster aws --name <hosted_cluster_name> \\ 1 --infra-id <infra_id> \\ 2 --base-domain <basedomain> \\ 3 --sts-creds <path_to_sts_credential_file> \\ 4 --pull-secret <path_to_pull_secret> \\ 5 --region <region> \\ 6 --generate-ssh --node-pool-replicas <node_pool_replica_count> \\ 7 --namespace <hosted_cluster_namespace> \\ 8 --role-arn <role_name> 9",
"get hostedclusters -n <hosted_cluster_namespace>",
"get nodepools --namespace <hosted_cluster_namespace>",
"hcp create cluster aws --name <hosted_cluster_name> \\ 1 --node-pool-replicas=<node_pool_replica_count> \\ 2 --base-domain <basedomain> \\ 3 --pull-secret <path_to_pull_secret> \\ 4 --role-arn <arn_role> \\ 5 --region <region> \\ 6 --zones <zones> 7 --sts-creds <path_to_sts_credential_file> 8",
"hcp create cluster aws --name <hosted_cluster_name> \\ 1 --node-pool-replicas <node_pool_replica_count> \\ 2 --base-domain <basedomain> \\ 3 --pull-secret <path_to_pull_secret> \\ 4 --sts-creds <path_to_sts_credential_file> \\ 5 --region <region> 6 --role-arn <arn_role> \\ 7",
"hcp create cluster aws --name <hosted_cluster_name> \\ 1 --node-pool-replicas <node_pool_replica_count> \\ 2 --base-domain <basedomain> \\ 3 --pull-secret <path_to_pull_secret> \\ 4 --sts-creds <path_to_sts_credential_file> \\ 5 --region <region> \\ 6 --release-image quay.io/openshift-release-dev/ocp-release:<ocp_release_image> \\ 7 --role-arn <role_name> \\ 8 --multi-arch 9",
"hcp create nodepool aws --cluster-name <hosted_cluster_name> \\ 1 --name <nodepool_name> \\ 2 --node-count <node_pool_replica_count> 3",
"hcp create nodepool aws --cluster-name <hosted_cluster_name> \\ 1 --name <node_pool_name> \\ 2 --node-count <node_pool_replica_count> 3 --arch <architecture> 4",
"--kubeconfig <hosted-cluster-name>.kubeconfig get nodes",
"hcp create kubeconfig --namespace <hosted-cluster-namespace> --name <hosted-cluster-name> > <hosted-cluster-name>.kubeconfig",
"--kubeconfig <hosted-cluster-name>.kubeconfig get nodes",
"hcp create cluster aws --name <hosted_cluster_name> \\ 1 --node-pool-replicas=<node_pool_replica_count> \\ 2 --base-domain <basedomain> \\ 3 --pull-secret <path_to_pull_secret> \\ 4 --sts-creds <path_to_sts_credential_file> \\ 5 --region <region> \\ 6 --endpoint-access Private 7 --role-arn <role_name> 8",
"aws ec2 describe-instances --filter=\"Name=tag:kubernetes.io/cluster/<infra_id>,Values=owned\" | jq '.Reservations[] | .Instances[] | select(.PublicDnsName==\"\") | .PrivateIpAddress'",
"hcp create kubeconfig > <hosted_cluster_kubeconfig>",
"ssh -o ProxyCommand=\"ssh ec2-user@<bastion_ip> -W %h:%p\" core@<node_ip>",
"mv <path_to_kubeconfig_file> <new_file_name>",
"export KUBECONFIG=<path_to_kubeconfig_file>",
"get clusteroperators clusterversion",
"endpointAccess: Public region: us-east-2 resourceTags: - key: kubernetes.io/cluster/example-cluster-bz4j5 value: owned rolesRef: controlPlaneOperatorARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-control-plane-operator imageRegistryARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-openshift-image-registry ingressARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-openshift-ingress kubeCloudControllerARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-cloud-controller networkARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-cloud-network-config-controller nodePoolManagementARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-node-pool storageARN: arn:aws:iam::820196288204:role/example-cluster-bz4j5-aws-ebs-csi-driver-controller type: AWS",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"elasticloadbalancing:DescribeLoadBalancers\", \"tag:GetResources\", \"route53:ListHostedZones\" ], \"Resource\": \"\\*\" }, { \"Effect\": \"Allow\", \"Action\": [ \"route53:ChangeResourceRecordSets\" ], \"Resource\": [ \"arn:aws:route53:::PUBLIC_ZONE_ID\", \"arn:aws:route53:::PRIVATE_ZONE_ID\" ] } ] }",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"s3:CreateBucket\", \"s3:DeleteBucket\", \"s3:PutBucketTagging\", \"s3:GetBucketTagging\", \"s3:PutBucketPublicAccessBlock\", \"s3:GetBucketPublicAccessBlock\", \"s3:PutEncryptionConfiguration\", \"s3:GetEncryptionConfiguration\", \"s3:PutLifecycleConfiguration\", \"s3:GetLifecycleConfiguration\", \"s3:GetBucketLocation\", \"s3:ListBucket\", \"s3:GetObject\", \"s3:PutObject\", \"s3:DeleteObject\", \"s3:ListBucketMultipartUploads\", \"s3:AbortMultipartUpload\", \"s3:ListMultipartUploadParts\" ], \"Resource\": \"\\*\" } ] }",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:AttachVolume\", \"ec2:CreateSnapshot\", \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:DeleteSnapshot\", \"ec2:DeleteTags\", \"ec2:DeleteVolume\", \"ec2:DescribeInstances\", \"ec2:DescribeSnapshots\", \"ec2:DescribeTags\", \"ec2:DescribeVolumes\", \"ec2:DescribeVolumesModifications\", \"ec2:DetachVolume\", \"ec2:ModifyVolume\" ], \"Resource\": \"\\*\" } ] }",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:DescribeInstances\", \"ec2:DescribeInstanceStatus\", \"ec2:DescribeInstanceTypes\", \"ec2:UnassignPrivateIpAddresses\", \"ec2:AssignPrivateIpAddresses\", \"ec2:UnassignIpv6Addresses\", \"ec2:AssignIpv6Addresses\", \"ec2:DescribeSubnets\", \"ec2:DescribeNetworkInterfaces\" ], \"Resource\": \"\\*\" } ] }",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:DescribeInstances\", \"ec2:DescribeImages\", \"ec2:DescribeRegions\", \"ec2:DescribeRouteTables\", \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\", \"ec2:DescribeVolumes\", \"ec2:CreateSecurityGroup\", \"ec2:CreateTags\", \"ec2:CreateVolume\", \"ec2:ModifyInstanceAttribute\", \"ec2:ModifyVolume\", \"ec2:AttachVolume\", \"ec2:AuthorizeSecurityGroupIngress\", \"ec2:CreateRoute\", \"ec2:DeleteRoute\", \"ec2:DeleteSecurityGroup\", \"ec2:DeleteVolume\", \"ec2:DetachVolume\", \"ec2:RevokeSecurityGroupIngress\", \"ec2:DescribeVpcs\", \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:AttachLoadBalancerToSubnets\", \"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancer\", \"elasticloadbalancing:CreateLoadBalancerPolicy\", \"elasticloadbalancing:CreateLoadBalancerListeners\", \"elasticloadbalancing:ConfigureHealthCheck\", \"elasticloadbalancing:DeleteLoadBalancer\", \"elasticloadbalancing:DeleteLoadBalancerListeners\", \"elasticloadbalancing:DescribeLoadBalancers\", \"elasticloadbalancing:DescribeLoadBalancerAttributes\", \"elasticloadbalancing:DetachLoadBalancerFromSubnets\", \"elasticloadbalancing:DeregisterInstancesFromLoadBalancer\", \"elasticloadbalancing:ModifyLoadBalancerAttributes\", \"elasticloadbalancing:RegisterInstancesWithLoadBalancer\", \"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer\", \"elasticloadbalancing:AddTags\", \"elasticloadbalancing:CreateListener\", \"elasticloadbalancing:CreateTargetGroup\", \"elasticloadbalancing:DeleteListener\", \"elasticloadbalancing:DeleteTargetGroup\", \"elasticloadbalancing:DescribeListeners\", \"elasticloadbalancing:DescribeLoadBalancerPolicies\", \"elasticloadbalancing:DescribeTargetGroups\", \"elasticloadbalancing:DescribeTargetHealth\", \"elasticloadbalancing:ModifyListener\", \"elasticloadbalancing:ModifyTargetGroup\", \"elasticloadbalancing:RegisterTargets\", \"elasticloadbalancing:SetLoadBalancerPoliciesOfListener\", \"iam:CreateServiceLinkedRole\", \"kms:DescribeKey\" ], \"Resource\": [ \"\\*\" ], \"Effect\": \"Allow\" } ] }",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Action\": [ \"ec2:AllocateAddress\", \"ec2:AssociateRouteTable\", \"ec2:AttachInternetGateway\", \"ec2:AuthorizeSecurityGroupIngress\", \"ec2:CreateInternetGateway\", \"ec2:CreateNatGateway\", \"ec2:CreateRoute\", \"ec2:CreateRouteTable\", \"ec2:CreateSecurityGroup\", \"ec2:CreateSubnet\", \"ec2:CreateTags\", \"ec2:DeleteInternetGateway\", \"ec2:DeleteNatGateway\", \"ec2:DeleteRouteTable\", \"ec2:DeleteSecurityGroup\", \"ec2:DeleteSubnet\", \"ec2:DeleteTags\", \"ec2:DescribeAccountAttributes\", \"ec2:DescribeAddresses\", \"ec2:DescribeAvailabilityZones\", \"ec2:DescribeImages\", \"ec2:DescribeInstances\", \"ec2:DescribeInternetGateways\", \"ec2:DescribeNatGateways\", \"ec2:DescribeNetworkInterfaces\", \"ec2:DescribeNetworkInterfaceAttribute\", \"ec2:DescribeRouteTables\", \"ec2:DescribeSecurityGroups\", \"ec2:DescribeSubnets\", \"ec2:DescribeVpcs\", \"ec2:DescribeVpcAttribute\", \"ec2:DescribeVolumes\", \"ec2:DetachInternetGateway\", \"ec2:DisassociateRouteTable\", \"ec2:DisassociateAddress\", \"ec2:ModifyInstanceAttribute\", \"ec2:ModifyNetworkInterfaceAttribute\", \"ec2:ModifySubnetAttribute\", \"ec2:ReleaseAddress\", \"ec2:RevokeSecurityGroupIngress\", \"ec2:RunInstances\", \"ec2:TerminateInstances\", \"tag:GetResources\", \"ec2:CreateLaunchTemplate\", \"ec2:CreateLaunchTemplateVersion\", \"ec2:DescribeLaunchTemplates\", \"ec2:DescribeLaunchTemplateVersions\", \"ec2:DeleteLaunchTemplate\", \"ec2:DeleteLaunchTemplateVersions\" ], \"Resource\": [ \"\\*\" ], \"Effect\": \"Allow\" }, { \"Condition\": { \"StringLike\": { \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\" } }, \"Action\": [ \"iam:CreateServiceLinkedRole\" ], \"Resource\": [ \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\" ], \"Effect\": \"Allow\" }, { \"Action\": [ \"iam:PassRole\" ], \"Resource\": [ \"arn:*:iam::*:role/*-worker-role\" ], \"Effect\": \"Allow\" } ] }",
"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"ec2:CreateVpcEndpoint\", \"ec2:DescribeVpcEndpoints\", \"ec2:ModifyVpcEndpoint\", \"ec2:DeleteVpcEndpoints\", \"ec2:CreateTags\", \"route53:ListHostedZones\" ], \"Resource\": \"\\*\" }, { \"Effect\": \"Allow\", \"Action\": [ \"route53:ChangeResourceRecordSets\", \"route53:ListResourceRecordSets\" ], \"Resource\": \"arn:aws:route53:::%s\" } ] }",
"--- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {} --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxxxx kind: Secret metadata: creationTimestamp: null labels: hypershift.openshift.io/safe-to-delete-with-cluster: \"true\" name: <pull_secret_name> 1 namespace: clusters --- apiVersion: v1 data: key: xxxxxxxxxxxxxxxxx kind: Secret metadata: creationTimestamp: null labels: hypershift.openshift.io/safe-to-delete-with-cluster: \"true\" name: <etcd_encryption_key_name> 2 namespace: clusters type: Opaque --- apiVersion: v1 data: id_rsa: xxxxxxxxx id_rsa.pub: xxxxxxxxx kind: Secret metadata: creationTimestamp: null labels: hypershift.openshift.io/safe-to-delete-with-cluster: \"true\" name: <ssh-key-name> 3 namespace: clusters --- apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: creationTimestamp: null name: <hosted_cluster_name> 4 namespace: clusters spec: autoscaling: {} configuration: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: <dns_domain> 5 privateZoneID: xxxxxxxx publicZoneID: xxxxxxxx etcd: managed: storage: persistentVolume: size: 8Gi storageClassName: gp3-csi type: PersistentVolume managementType: Managed fips: false infraID: <infra_id> 6 issuerURL: <issuer_url> 7 networking: clusterNetwork: - cidr: 10.132.0.0/14 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 olmCatalogPlacement: management platform: aws: cloudProviderConfig: subnet: id: <subnet_xxx> 8 vpc: <vpc_xxx> 9 zone: us-west-1b endpointAccess: Public multiArch: false region: us-west-1 rolesRef: controlPlaneOperatorARN: arn:aws:iam::820196288204:role/<infra_id>-control-plane-operator imageRegistryARN: arn:aws:iam::820196288204:role/<infra_id>-openshift-image-registry ingressARN: arn:aws:iam::820196288204:role/<infra_id>-openshift-ingress kubeCloudControllerARN: arn:aws:iam::820196288204:role/<infra_id>-cloud-controller networkARN: arn:aws:iam::820196288204:role/<infra_id>-cloud-network-config-controller nodePoolManagementARN: arn:aws:iam::820196288204:role/<infra_id>-node-pool storageARN: arn:aws:iam::820196288204:role/<infra_id>-aws-ebs-csi-driver-controller type: AWS pullSecret: name: <pull_secret_name> release: image: quay.io/openshift-release-dev/ocp-release:4.16-x86_64 secretEncryption: aescbc: activeKey: name: <etcd_encryption_key_name> type: aescbc services: - service: APIServer servicePublishingStrategy: type: LoadBalancer - service: OAuthServer servicePublishingStrategy: type: Route - service: Konnectivity servicePublishingStrategy: type: Route - service: Ignition servicePublishingStrategy: type: Route - service: OVNSbDb servicePublishingStrategy: type: Route sshKey: name: <ssh_key_name> status: controlPlaneEndpoint: host: \"\" port: 0 --- apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: <node_pool_name> 10 namespace: clusters spec: arch: amd64 clusterName: <hosted_cluster_name> management: autoRepair: true upgradeType: Replace nodeDrainTimeout: 0s platform: aws: instanceProfile: <instance_profile_name> 11 instanceType: m6i.xlarge rootVolume: size: 120 type: gp3 subnet: id: <subnet_xxx> type: AWS release: image: quay.io/openshift-release-dev/ocp-release:4.16-x86_64 replicas: 2 status: replicas: 0",
"hcp create cluster aws --infra-id <infra_id> \\ 1 --name <hosted_cluster_name> \\ 2 --sts-creds <path_to_sts_credential_file> \\ 3 --pull-secret <path_to_pull_secret> \\ 4 --generate-ssh \\ 5 --node-pool-replicas 3 --role-arn <role_name> 6",
"delete managedcluster <hosted_cluster_name>",
"hcp destroy cluster aws --name <hosted_cluster_name> \\ 1 --infra-id <infra_id> \\ 2 --role-arn <arn_role> \\ 3 --sts-creds <path_to_sts_credential_file> \\ 4 --base-domain <basedomain> 5",
"aws sts get-session-token --output json > sts-creds.json",
"get managedclusters local-cluster",
"api.example.krnl.es. IN A 192.168.122.20 api.example.krnl.es. IN A 192.168.122.21 api.example.krnl.es. IN A 192.168.122.22 api-int.example.krnl.es. IN A 192.168.122.20 api-int.example.krnl.es. IN A 192.168.122.21 api-int.example.krnl.es. IN A 192.168.122.22 `*`.apps.example.krnl.es. IN A 192.168.122.23",
"api.example.krnl.es. IN A 2620:52:0:1306::5 api.example.krnl.es. IN A 2620:52:0:1306::6 api.example.krnl.es. IN A 2620:52:0:1306::7 api-int.example.krnl.es. IN A 2620:52:0:1306::5 api-int.example.krnl.es. IN A 2620:52:0:1306::6 api-int.example.krnl.es. IN A 2620:52:0:1306::7 `*`.apps.example.krnl.es. IN A 2620:52:0:1306::10",
"host-record=api-int.hub-dual.dns.base.domain.name,192.168.126.10 host-record=api.hub-dual.dns.base.domain.name,192.168.126.10 address=/apps.hub-dual.dns.base.domain.name/192.168.126.11 dhcp-host=aa:aa:aa:aa:10:01,ocp-master-0,192.168.126.20 dhcp-host=aa:aa:aa:aa:10:02,ocp-master-1,192.168.126.21 dhcp-host=aa:aa:aa:aa:10:03,ocp-master-2,192.168.126.22 dhcp-host=aa:aa:aa:aa:10:06,ocp-installer,192.168.126.25 dhcp-host=aa:aa:aa:aa:10:07,ocp-bootstrap,192.168.126.26 host-record=api-int.hub-dual.dns.base.domain.name,2620:52:0:1306::2 host-record=api.hub-dual.dns.base.domain.name,2620:52:0:1306::2 address=/apps.hub-dual.dns.base.domain.name/2620:52:0:1306::3 dhcp-host=aa:aa:aa:aa:10:01,ocp-master-0,[2620:52:0:1306::5] dhcp-host=aa:aa:aa:aa:10:02,ocp-master-1,[2620:52:0:1306::6] dhcp-host=aa:aa:aa:aa:10:03,ocp-master-2,[2620:52:0:1306::7] dhcp-host=aa:aa:aa:aa:10:06,ocp-installer,[2620:52:0:1306::8] dhcp-host=aa:aa:aa:aa:10:07,ocp-bootstrap,[2620:52:0:1306::9]",
"create ns <hosted_cluster_namespace>-<hosted_cluster_name>",
"hcp create cluster agent --name=<hosted_cluster_name> \\ 1 --pull-secret=<path_to_pull_secret> \\ 2 --agent-namespace=<hosted_control_plane_namespace> \\ 3 --base-domain=<basedomain> \\ 4 --api-server-address=api.<hosted_cluster_name>.<basedomain> \\ 5 --etcd-storage-class=<etcd_storage_class> \\ 6 --ssh-key <path_to_ssh_public_key> \\ 7 --namespace <hosted_cluster_namespace> \\ 8 --control-plane-availability-policy SingleReplica --release-image=quay.io/openshift-release-dev/ocp-release:<ocp_release_image> 9",
"-n <hosted_control_plane_namespace> get pods",
"NAME READY STATUS RESTARTS AGE capi-provider-7dcf5fc4c4-nr9sq 1/1 Running 0 4m32s catalog-operator-6cd867cc7-phb2q 2/2 Running 0 2m50s certified-operators-catalog-884c756c4-zdt64 1/1 Running 0 2m51s cluster-api-f75d86f8c-56wfz 1/1 Running 0 4m32s",
"- mirrors: - brew.registry.redhat.io source: registry.redhat.io - mirrors: - brew.registry.redhat.io source: registry.stage.redhat.io - mirrors: - brew.registry.redhat.io source: registry-proxy.engineering.redhat.com",
"hcp create cluster agent --name=<hosted_cluster_name> \\ 1 --pull-secret=<path_to_pull_secret> \\ 2 --agent-namespace=<hosted_control_plane_namespace> \\ 3 --base-domain=<basedomain> \\ 4 --api-server-address=api.<hosted_cluster_name>.<basedomain> \\ 5 --image-content-sources icsp.yaml \\ 6 --ssh-key <path_to_ssh_key> \\ 7 --namespace <hosted_cluster_namespace> \\ 8 --release-image=quay.io/openshift-release-dev/ocp-release:<ocp_release_image> 9",
"extract -n <hosted-control-plane-namespace> secret/admin-kubeconfig --to=- > kubeconfig-<hosted-cluster-name>",
"get co --kubeconfig=kubeconfig-<hosted-cluster-name>",
"NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE console 4.10.26 True False False 2m38s dns 4.10.26 True False False 2m52s image-registry 4.10.26 True False False 2m8s ingress 4.10.26 True False False 22m",
"get pods -A --kubeconfig=kubeconfig-<hosted-cluster-name>",
"NAMESPACE NAME READY STATUS RESTARTS AGE kube-system konnectivity-agent-khlqv 0/1 Running 0 3m52s openshift-cluster-node-tuning-operator tuned-dhw5p 1/1 Running 0 109s openshift-cluster-storage-operator cluster-storage-operator-5f784969f5-vwzgz 1/1 Running 1 (113s ago) 20m openshift-cluster-storage-operator csi-snapshot-controller-6b7687b7d9-7nrfw 1/1 Running 0 3m8s openshift-console console-5cbf6c7969-6gk6z 1/1 Running 0 119s openshift-console downloads-7bcd756565-6wj5j 1/1 Running 0 4m3s openshift-dns-operator dns-operator-77d755cd8c-xjfbn 2/2 Running 0 21m openshift-dns dns-default-kfqnh 2/2 Running 0 113s",
"-n <hosted-cluster-namespace> scale nodepool <nodepool-name> --replicas 2",
"-n <hosted-control-plane-namespace> get agent",
"NAME CLUSTER APPROVED ROLE STAGE 4dac1ab2-7dd5-4894-a220-6a3473b67ee6 hypercluster1 true auto-assign d9198891-39f4-4930-a679-65fb142b108b true auto-assign da503cf1-a347-44f2-875c-4960ddb04091 hypercluster1 true auto-assign",
"-n <hosted-control-plane-namespace> get agent -o jsonpath='{range .items[*]}BMH: {@.metadata.labels.agent-install\\.openshift\\.io/bmh} Agent: {@.metadata.name} State: {@.status.debugInfo.state}{\"\\n\"}{end}'",
"BMH: ocp-worker-2 Agent: 4dac1ab2-7dd5-4894-a220-6a3473b67ee6 State: binding BMH: ocp-worker-0 Agent: d9198891-39f4-4930-a679-65fb142b108b State: known-unbound BMH: ocp-worker-1 Agent: da503cf1-a347-44f2-875c-4960ddb04091 State: insufficient",
"extract -n <hosted-cluster-namespace> secret/<hosted-cluster-name>-admin-kubeconfig --to=- > kubeconfig-<hosted-cluster-name>",
"--kubeconfig kubeconfig-<hosted-cluster-name> get nodes",
"NAME STATUS ROLES AGE VERSION ocp-worker-1 Ready worker 5m41s v1.24.0+3882f8f ocp-worker-2 Ready worker 6m3s v1.24.0+3882f8f",
"-n <hosted-control-plane-namespace> get machines",
"NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION hypercluster1-c96b6f675-m5vch hypercluster1-b2qhl ocp-worker-1 agent://da503cf1-a347-44f2-875c-4960ddb04091 Running 15m 4.x.z hypercluster1-c96b6f675-tl42p hypercluster1-b2qhl ocp-worker-2 agent://4dac1ab2-7dd5-4894-a220-6a3473b67ee6 Running 15m 4.x.z",
"--kubeconfig kubeconfig-<hosted-cluster-name> get clusterversion,co",
"NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version False True 40m Unable to apply 4.x.z: the cluster operator console has not yet successfully rolled out NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE clusteroperator.config.openshift.io/console 4.12z False False False 11m RouteHealthAvailable: failed to GET route (https://console-openshift-console.apps.hypercluster1.domain.com): Get \"https://console-openshift-console.apps.hypercluster1.domain.com\": dial tcp 10.19.3.29:443: connect: connection refused clusteroperator.config.openshift.io/csi-snapshot-controller 4.12z True False False 10m clusteroperator.config.openshift.io/dns 4.12z True False False 9m16s",
"export NODEPOOL_NAME=USD{CLUSTER_NAME}-extra-cpu export WORKER_COUNT=\"2\" hcp create nodepool agent --cluster-name USDCLUSTER_NAME --name USDNODEPOOL_NAME --node-count USDWORKER_COUNT --agentLabelSelector '{\"matchLabels\": {\"size\": \"medium\"}}' 1",
"get nodepools --namespace clusters",
"extract -n <hosted-control-plane-namespace> secret/admin-kubeconfig --to=./hostedcluster-secrets --confirm",
"hostedcluster-secrets/kubeconfig",
"--kubeconfig ./hostedcluster-secrets get nodes",
"get nodepools --namespace clusters",
"apiVersion: v1 kind: Namespace metadata: name: metallb labels: openshift.io/cluster-monitoring: \"true\" annotations: workload.openshift.io/allowed: management --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: metallb-operator-operatorgroup namespace: metallb --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: metallb-operator namespace: metallb spec: channel: \"stable\" name: metallb-operator source: redhat-operators sourceNamespace: openshift-marketplace",
"apply -f metallb-operator-config.yaml",
"apiVersion: metallb.io/v1beta1 kind: MetalLB metadata: name: metallb namespace: metallb",
"apply -f metallb-instance-config.yaml",
"apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: <ip_address_pool_name> 1 namespace: metallb spec: protocol: layer2 autoAssign: false addresses: - <ingress_ip>-<ingress_ip> 2 --- apiVersion: metallb.io/v1beta1 kind: BGPAdvertisement metadata: name: <bgp_advertisement_name> 3 namespace: metallb spec: ipAddressPools: - <ip_address_pool_name> 4",
"apply -f ipaddresspool-bgpadvertisement-config.yaml",
"kind: Service apiVersion: v1 metadata: annotations: metallb.universe.tf/address-pool: ingress-public-ip name: metallb-ingress namespace: openshift-ingress spec: ports: - name: http protocol: TCP port: 80 targetPort: 80 - name: https protocol: TCP port: 443 targetPort: 443 selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default type: LoadBalancer",
"apply -f metallb-loadbalancer-service.yaml",
"curl -kI https://console-openshift-console.apps.example.krnl.es HTTP/1.1 200 OK",
"--kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion,co",
"NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version 4.x.y True False 3m32s Cluster version is 4.x.y NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE clusteroperator.config.openshift.io/console 4.x.y True False False 3m50s clusteroperator.config.openshift.io/ingress 4.x.y True False False 53m",
"-n <hosted-cluster-namespace> patch nodepool <hosted-cluster-name> --type=json -p '[{\"op\": \"remove\", \"path\": \"/spec/replicas\"},{\"op\":\"add\", \"path\": \"/spec/autoScaling\", \"value\": { \"max\": 5, \"min\": 2 }}]'",
"apiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null labels: app: reversewords name: reversewords namespace: default spec: replicas: 40 selector: matchLabels: app: reversewords strategy: {} template: metadata: creationTimestamp: null labels: app: reversewords spec: containers: - image: quay.io/mavazque/reversewords:latest name: reversewords resources: requests: memory: 2Gi status: {}",
"apply -f workload-config.yaml",
"extract -n <hosted-cluster-namespace> secret/<hosted-cluster-name>-admin-kubeconfig --to=./hostedcluster-secrets --confirm",
"hostedcluster-secrets/kubeconfig",
"--kubeconfig ./hostedcluster-secrets get nodes",
"--kubeconfig ./hostedcluster-secrets -n default delete deployment reversewords",
"--kubeconfig ./hostedcluster-secrets get nodes",
"-n <hosted-cluster-namespace> patch nodepool <hosted-cluster-name> --type=json -p '[\\{\"op\":\"remove\", \"path\": \"/spec/autoScaling\"}, \\{\"op\": \"add\", \"path\": \"/spec/replicas\", \"value\": <specify-value-to-scale-replicas>]'",
"get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep nodeDrainTimeout",
"nodeDrainTimeout: 30s",
"patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{\"spec\":{\"nodeDrainTimeout\": \"30m\"}}' --type=merge",
"patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{\"spec\": {\"management\": {\"autoRepair\":true}}}' --type=merge",
"get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair",
"patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{\"spec\": {\"management\": {\"autoRepair\":false}}}' --type=merge",
"get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair",
"hcp destroy cluster agent --name <cluster_name>",
"get managedclusters local-cluster",
"api-int.example.krnl.es. IN A 192.168.122.22 `*`.apps.example.krnl.es. IN A 192.168.122.23",
"api-int.example.krnl.es. IN A 2620:52:0:1306::7 `*`.apps.example.krnl.es. IN A 2620:52:0:1306::10",
"host-record=api-int.hub-dual.dns.base.domain.name,2620:52:0:1306::2 address=/apps.hub-dual.dns.base.domain.name/2620:52:0:1306::3 dhcp-host=aa:aa:aa:aa:10:01,ocp-master-0,[2620:52:0:1306::5]",
"create ns <hosted-cluster-namespace>-<hosted-cluster-name>",
"hcp create cluster agent --name=<hosted-cluster-name> \\ 1 --pull-secret=<path-to-pull-secret> \\ 2 --agent-namespace=<hosted-control-plane-namespace> \\ 3 --base-domain=<basedomain> \\ 4 --api-server-address=api.<hosted-cluster-name>.<basedomain> \\ 5 --etcd-storage-class=<etcd-storage-class> \\ 6 --ssh-key <path-to-ssh-key> \\ 7 --namespace <hosted-cluster-namespace> \\ 8 --control-plane-availability-policy SingleReplica --release-image=quay.io/openshift-release-dev/ocp-release:<ocp-release> 9",
"-n <hosted-control-plane-namespace> get pods",
"NAME READY STATUS RESTARTS AGE catalog-operator-6cd867cc7-phb2q 2/2 Running 0 2m50s control-plane-operator-f6b4c8465-4k5dh 1/1 Running 0 4m32s",
"extract -n <hosted-cluster-namespace> secret/<hosted-cluster-name>-admin-kubeconfig --to=- > kubeconfig-<hosted-cluster-name>",
"get co --kubeconfig=kubeconfig-<hosted_cluster_name>",
"NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE console 4.10.26 True False False 2m38s csi-snapshot-controller 4.10.26 True False False 4m3s dns 4.10.26 True False False 2m52s",
"get pods -A --kubeconfig=kubeconfig-<hosted-cluster-name>",
"NAMESPACE NAME READY STATUS RESTARTS AGE kube-system konnectivity-agent-khlqv 0/1 Running 0 3m52s openshift-cluster-samples-operator cluster-samples-operator-6b5bcb9dff-kpnbc 2/2 Running 0 20m openshift-monitoring alertmanager-main-0 6/6 Running 0 100s openshift-monitoring openshift-state-metrics-677b9fb74f-qqp6g 3/3 Running 0 104s",
"-n <hosted-cluster-namespace> scale nodepool <nodepool-name> --replicas 2",
"-n <hosted-control-plane-namespace> get agent",
"NAME CLUSTER APPROVED ROLE STAGE 4dac1ab2-7dd5-4894-a220-6a3473b67ee6 hypercluster1 true auto-assign",
"-n <hosted-control-plane-namespace> get agent -o jsonpath='{range .items[*]}BMH: {@.metadata.labels.agent-install\\.openshift\\.io/bmh} Agent: {@.metadata.name} State: {@.status.debugInfo.state}{\"\\n\"}{end}'",
"BMH: ocp-worker-2 Agent: 4dac1ab2-7dd5-4894-a220-6a3473b67ee6 State: binding BMH: ocp-worker-1 Agent: da503cf1-a347-44f2-875c-4960ddb04091 State: insufficient",
"extract -n <hosted-cluster-namespace> secret/<hosted-cluster-name>-admin-kubeconfig --to=- > kubeconfig-<hosted-cluster-name>",
"--kubeconfig kubeconfig-<hosted-cluster-name> get nodes",
"NAME STATUS ROLES AGE VERSION ocp-worker-1 Ready worker 5m41s v1.24.0+3882f8f",
"-n <hosted-control-plane-namespace> get machines",
"NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION hypercluster1-c96b6f675-m5vch hypercluster1-b2qhl ocp-worker-1 agent://da503cf1-a347-44f2-875c-4960ddb04091 Running 15m 4.x.z",
"--kubeconfig kubeconfig-<hosted-cluster-name> get clusterversion,co",
"NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version False True 40m Unable to apply 4.x.z: the cluster operator console has not yet successfully rolled out NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE clusteroperator.config.openshift.io/console 4.x.z False False False 11m RouteHealthAvailable: failed to GET route (https://console-openshift-console.apps.hypercluster1.domain.com): Get \"https://console-openshift-console.apps.hypercluster1.domain.com\": dial tcp 10.19.3.29:443: connect: connection refused clusteroperator.config.openshift.io/csi-snapshot-controller 4.x.z True False False 10m clusteroperator.config.openshift.io/dns 4.x.z True False False 9m16s",
"export NODEPOOL_NAME=USD{CLUSTER_NAME}-extra-cpu export WORKER_COUNT=\"2\" hcp create nodepool agent --cluster-name USDCLUSTER_NAME --name USDNODEPOOL_NAME --node-count USDWORKER_COUNT --agentLabelSelector '{\"matchLabels\": {\"size\": \"medium\"}}' 1",
"get nodepools --namespace clusters",
"extract -n <hosted-cluster-namespace> secret/<hosted-cluster-name>-admin-kubeconfig --to=./hostedcluster-secrets --confirm",
"hostedcluster-secrets/kubeconfig",
"--kubeconfig ./hostedcluster-secrets get nodes",
"get nodepools --namespace clusters",
"apiVersion: v1 kind: Namespace metadata: name: metallb labels: openshift.io/cluster-monitoring: \"true\" annotations: workload.openshift.io/allowed: management --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: metallb-operator-operatorgroup namespace: metallb --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: metallb-operator namespace: metallb spec: channel: \"stable\" name: metallb-operator source: redhat-operators sourceNamespace: openshift-marketplace",
"apply -f metallb-operator-config.yaml",
"apiVersion: metallb.io/v1beta1 kind: MetalLB metadata: name: metallb namespace: metallb",
"apply -f metallb-instance-config.yaml",
"apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: <ip_address_pool_name> 1 namespace: metallb spec: protocol: layer2 autoAssign: false addresses: - <ingress_ip>-<ingress_ip> 2 --- apiVersion: metallb.io/v1beta1 kind: BGPAdvertisement metadata: name: <bgp_advertisement_name> 3 namespace: metallb spec: ipAddressPools: - <ip_address_pool_name> 4",
"apply -f ipaddresspool-bgpadvertisement-config.yaml",
"kind: Service apiVersion: v1 metadata: annotations: metallb.universe.tf/address-pool: ingress-public-ip name: metallb-ingress namespace: openshift-ingress spec: ports: - name: http protocol: TCP port: 80 targetPort: 80 - name: https protocol: TCP port: 443 targetPort: 443 selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default type: LoadBalancer",
"apply -f metallb-loadbalancer-service.yaml",
"curl -kI https://console-openshift-console.apps.example.krnl.es HTTP/1.1 200 OK",
"--kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion,co",
"NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version 4.x.y True False 3m32s Cluster version is 4.x.y NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE clusteroperator.config.openshift.io/console 4.x.y True False False 3m50s clusteroperator.config.openshift.io/ingress 4.x.y True False False 53m",
"-n <hosted-cluster-namespace> patch nodepool <hosted-cluster-name> --type=json -p '[{\"op\": \"remove\", \"path\": \"/spec/replicas\"},{\"op\":\"add\", \"path\": \"/spec/autoScaling\", \"value\": { \"max\": 5, \"min\": 2 }}]'",
"apiVersion: apps/v1 kind: Deployment metadata: creationTimestamp: null labels: app: reversewords name: reversewords namespace: default spec: replicas: 40 selector: matchLabels: app: reversewords strategy: {} template: metadata: creationTimestamp: null labels: app: reversewords spec: containers: - image: quay.io/mavazque/reversewords:latest name: reversewords resources: requests: memory: 2Gi status: {}",
"apply -f workload-config.yaml",
"extract -n <hosted-cluster-namespace> secret/<hosted-cluster-name>admin-kubeconfig --to=./hostedcluster-secrets --confirm",
"hostedcluster-secrets/kubeconfig",
"--kubeconfig <hosted-cluster-name>.kubeconfig get nodes",
"--kubeconfig <hosted-cluster-name>.kubeconfig -n default delete deployment reversewords",
"--kubeconfig <hosted-cluster-name>.kubeconfig get nodes",
"-n <hosted-cluster-namespace> patch nodepool <hosted-cluster-name> --type=json -p '[\\{\"op\":\"remove\", \"path\": \"/spec/autoScaling\"}, \\{\"op\": \"add\", \"path\": \"/spec/replicas\", \"value\": <specify-value-to-scale-replicas>]'",
"get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep nodeDrainTimeout",
"nodeDrainTimeout: 30s",
"patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{\"spec\":{\"nodeDrainTimeout\": \"30m\"}}' --type=merge",
"patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{\"spec\": {\"management\": {\"autoRepair\":true}}}' --type=merge",
"get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair",
"patch nodepool -n <hosted_cluster_namespace> <nodepool_name> -p '{\"spec\": {\"management\": {\"autoRepair\":false}}}' --type=merge",
"get nodepool -n <hosted_cluster_namespace> <nodepool_name> -o yaml | grep autoRepair",
"hcp destroy cluster agent --name <hosted_cluster_name>",
"get managedclusters local-cluster",
"-n <hosted-control-plane-namespace> get agents",
"NAME CLUSTER APPROVED ROLE STAGE 86f7ac75-4fc4-4b36-8130-40fa12602218 auto-assign e57a637f-745b-496e-971d-1abbf03341ba auto-assign",
"-n <hosted-control-plane-namespace> patch agent 86f7ac75-4fc4-4b36-8130-40fa12602218 -p '{\"spec\":{\"installation_disk_id\":\"/dev/sda\",\"approved\":true,\"hostname\":\"worker-0.example.krnl.es\"}}' --type merge -n <hosted-control-plane-namespace> patch agent 23d0c614-2caa-43f5-b7d3-0b3564688baa -p '{\"spec\":{\"installation_disk_id\":\"/dev/sda\",\"approved\":true,\"hostname\":\"worker-1.example.krnl.es\"}}' --type merge",
"-n <hosted-control-plane-namespace> get agents",
"NAME CLUSTER APPROVED ROLE STAGE 86f7ac75-4fc4-4b36-8130-40fa12602218 true auto-assign e57a637f-745b-496e-971d-1abbf03341ba true auto-assign",
"cat /var/named/<example.krnl.es.zone>",
"TTL 900 @ IN SOA bastion.example.krnl.es.com. hostmaster.example.krnl.es.com. ( 2019062002 1D 1H 1W 3H ) IN NS bastion.example.krnl.es.com. ; ; api IN A 1xx.2x.2xx.1xx 1 api-int IN A 1xx.2x.2xx.1xx ; ; *.apps.<hosted-cluster-name>.<basedomain> IN A 1xx.2x.2xx.1xx ; ;EOF",
"compute-0 IN A 1xx.2x.2xx.1yy compute-1 IN A 1xx.2x.2xx.1yy",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: <hosted-cluster-name> namespace: <hosted-control-plane-namespace> spec: cpuArchitecture: ppc64le pullSecretRef: name: pull-secret sshAuthorizedKey: <ssh-public-key>",
"apply -f infraenv-config.yaml",
"-n <hosted-control-plane-namespace> get InfraEnv <hosted-cluster-name> -o json",
"-n <clusters_namespace> scale nodepool <nodepool_name> --replicas 2",
"-n <hosted_control_plane_namespace> get agent -o jsonpath='{range .items[*]}BMH: {@.metadata.labels.agent-install\\.openshift\\.io/bmh} Agent: {@.metadata.name} State: {@.status.debugInfo.state}{\"\\n\"}{end}'",
"BMH: Agent: 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d State: known-unbound BMH: Agent: 5e498cd3-542c-e54f-0c58-ed43e28b568a State: insufficient",
"-n <hosted_control_plane_namespace> get agent",
"NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d hosted-forwarder true auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a true auto-assign da503cf1-a347-44f2-875c-4960ddb04091 hosted-forwarder true auto-assign",
"hcp create kubeconfig --namespace <clusters_namespace> --name <hosted_cluster_namespace> > <hosted_cluster_name>.kubeconfig",
"--kubeconfig <hosted_cluster_name>.kubeconfig get nodes",
"NAME STATUS ROLES AGE VERSION worker-zvm-0.hostedn.example.com Ready worker 5m41s v1.24.0+3882f8f worker-zvm-1.hostedn.example.com Ready worker 6m3s v1.24.0+3882f8f",
"-n <hosted_control_plane_namespace> get machine.cluster.x-k8s.io",
"NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION hosted-forwarder-79558597ff-5tbqp hosted-forwarder-crqq5 worker-zvm-0.hostedn.example.com agent://50c23cda-cedc-9bbd-bcf1-9b3a5c75804d Running 41h 4.15.0 hosted-forwarder-79558597ff-lfjfk hosted-forwarder-crqq5 worker-zvm-1.hostedn.example.com agent://5e498cd3-542c-e54f-0c58-ed43e28b568a Running 41h 4.15.0",
"--kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion",
"NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version 4.15.0 True False 40h Cluster version is 4.15.0",
"--kubeconfig <hosted_cluster_name>.kubeconfig get clusteroperators",
"get managedclusters local-cluster",
"virt-install --name \"<vm_name>\" \\ 1 --autostart --ram=16384 --cpu host --vcpus=4 --location \"<path_to_kernel_initrd_image>,kernel=kernel.img,initrd=initrd.img\" \\ 2 --disk <qcow_image_path> \\ 3 --network network:macvtap-net,mac=<mac_address> \\ 4 --graphics none --noautoconsole --wait=-1 --extra-args \"rd.neednet=1 nameserver=<nameserver> coreos.live.rootfs_url=http://<http_server>/rootfs.img random.trust_cpu=on rd.luks.options=discard ignition.firstboot ignition.platform.id=metal console=tty1 console=ttyS1,115200n8 coreos.inst.persistent-kargs=console=tty1 console=ttyS1,115200n8\" 5",
"virt-install --name \"<vm_name>\" \\ 1 --autostart --memory=16384 --cpu host --vcpus=4 --network network:macvtap-net,mac=<mac_address> \\ 2 --cdrom \"<path_to_image.iso>\" \\ 3 --disk <qcow_image_path> --graphics none --noautoconsole --os-variant <os_version> \\ 4 --wait=-1",
"rd.neednet=1 cio_ignore=all,!condev console=ttysclp0 ignition.firstboot ignition.platform.id=metal coreos.live.rootfs_url=http://<http_server>/rhcos-<version>-live-rootfs.<architecture>.img \\ 1 coreos.inst.persistent-kargs=console=ttysclp0 ip=<ip>::<gateway>:<netmask>:<hostname>::none nameserver=<dns> \\ 2 rd.znet=qeth,<network_adaptor_range>,layer2=1 rd.<disk_type>=<adapter> \\ 3 zfcp.allow_lun_scan=0 ai.ip_cfg_override=1 \\ 4 random.trust_cpu=on rd.luks.options=discard",
"KERNEL_IMG_PATH='./kernel.img' INITRD_IMG_PATH='./initrd.img' CMDLINE_PATH='./generic.prm' kernel_size=USD(stat -c%s USDKERNEL_IMG_PATH ) initrd_size=USD(stat -c%s USDINITRD_IMG_PATH)",
"offset=USD(( (kernel_size + 1048575) / 1048576 * 1048576 ))",
"INITRD_IMG_NAME=USD(echo USDINITRD_IMG_PATH | rev | cut -d '/' -f 1 | rev) KERNEL_OFFSET=0x00000000 KERNEL_CMDLINE_OFFSET=0x00010480 INITRD_ADDR_SIZE_OFFSET=0x00010408 OFFSET_HEX=USD(printf '0x%08x\\n' USDoffset)",
"printf \"USD(printf '%016x\\n' USDinitrd_size)\" | xxd -r -p > temp_size.bin",
"cat temp_address.bin temp_size.bin > \"USDINITRD_IMG_NAME.addrsize\"",
"rm -rf temp_address.bin temp_size.bin",
"USDKERNEL_IMG_PATH USDKERNEL_OFFSET USDINITRD_IMG_PATH USDOFFSET_HEX USDINITRD_IMG_NAME.addrsize USDINITRD_ADDR_SIZE_OFFSET USDCMDLINE_PATH USDKERNEL_CMDLINE_OFFSET",
"rd.neednet=1 cio_ignore=all,!condev console=ttysclp0 ignition.firstboot ignition.platform.id=metal coreos.live.rootfs_url=http://<http_server>/rhcos-<version>-live-rootfs.<architecture>.img \\ 1 coreos.inst.persistent-kargs=console=ttysclp0 ip=<ip>::<gateway>:<netmask>:<hostname>::none nameserver=<dns> \\ 2 rd.znet=qeth,<network_adaptor_range>,layer2=1 rd.<disk_type>=<adapter> \\ 3 zfcp.allow_lun_scan=0 ai.ip_cfg_override=1 \\ 4",
"vmur pun -r -u -N kernel.img USDINSTALLERKERNELLOCATION/<image name>",
"vmur pun -r -u -N generic.parm USDPARMFILELOCATION/paramfilename",
"vmur pun -r -u -N initrd.img USDINSTALLERINITRAMFSLOCATION/<image name>",
"cp ipl c",
"-n <hosted_control_plane_namespace> get agents",
"NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a auto-assign",
"-n <hosted_control_plane_namespace> patch agent 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d -p '{\"spec\":{\"installation_disk_id\":\"/dev/sda\",\"approved\":true,\"hostname\":\"worker-zvm-0.hostedn.example.com\"}}' --type merge",
"-n <hosted_control_plane_namespace> get agents",
"NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d true auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a true auto-assign",
"cat /var/named/<example.krnl.es.zone>",
"TTL 900 @ IN SOA bastion.example.krnl.es.com. hostmaster.example.krnl.es.com. ( 2019062002 1D 1H 1W 3H ) IN NS bastion.example.krnl.es.com. ; ; api IN A 1xx.2x.2xx.1xx 1 api-int IN A 1xx.2x.2xx.1xx ; ; *.apps IN A 1xx.2x.2xx.1xx ; ;EOF",
"compute-0 IN A 1xx.2x.2xx.1yy compute-1 IN A 1xx.2x.2xx.1yy",
"apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: <hosted-cluster-name> namespace: <hosted-control-plane-namespace> spec: cpuArchitecture: s390x pullSecretRef: name: pull-secret sshAuthorizedKey: <ssh-public-key>",
"apply -f infraenv-config.yaml",
"-n <hosted-control-plane-namespace> get InfraEnv <hosted-cluster-name> -o json",
"-n <clusters_namespace> scale nodepool <nodepool_name> --replicas 2",
"-n <hosted_control_plane_namespace> get agent -o jsonpath='{range .items[*]}BMH: {@.metadata.labels.agent-install\\.openshift\\.io/bmh} Agent: {@.metadata.name} State: {@.status.debugInfo.state}{\"\\n\"}{end}'",
"BMH: Agent: 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d State: known-unbound BMH: Agent: 5e498cd3-542c-e54f-0c58-ed43e28b568a State: insufficient",
"-n <hosted_control_plane_namespace> get agent",
"NAME CLUSTER APPROVED ROLE STAGE 50c23cda-cedc-9bbd-bcf1-9b3a5c75804d hosted-forwarder true auto-assign 5e498cd3-542c-e54f-0c58-ed43e28b568a true auto-assign da503cf1-a347-44f2-875c-4960ddb04091 hosted-forwarder true auto-assign",
"hcp create kubeconfig --namespace <clusters_namespace> --name <hosted_cluster_namespace> > <hosted_cluster_name>.kubeconfig",
"--kubeconfig <hosted_cluster_name>.kubeconfig get nodes",
"NAME STATUS ROLES AGE VERSION worker-zvm-0.hostedn.example.com Ready worker 5m41s v1.24.0+3882f8f worker-zvm-1.hostedn.example.com Ready worker 6m3s v1.24.0+3882f8f",
"-n <hosted_control_plane_namespace> get machine.cluster.x-k8s.io",
"NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION hosted-forwarder-79558597ff-5tbqp hosted-forwarder-crqq5 worker-zvm-0.hostedn.example.com agent://50c23cda-cedc-9bbd-bcf1-9b3a5c75804d Running 41h 4.15.0 hosted-forwarder-79558597ff-lfjfk hosted-forwarder-crqq5 worker-zvm-1.hostedn.example.com agent://5e498cd3-542c-e54f-0c58-ed43e28b568a Running 41h 4.15.0",
"--kubeconfig <hosted_cluster_name>.kubeconfig get clusterversion,co",
"NAME VERSION AVAILABLE PROGRESSING SINCE STATUS clusterversion.config.openshift.io/version 4.15.0-ec.2 True False 40h Cluster version is 4.15.0-ec.2",
"--kubeconfig <hosted_cluster_name>.kubeconfig get clusteroperators",
"-n <clusters_namespace> scale nodepool <nodepool_name> --replicas 0",
"--kubeconfig <hosted_clusted_name>.kubeconfig delete node <comopute_node_name>",
"--kubeconfig <hosted_cluster_name>.kubeconfig get nodes",
"-n <hosted_control_plane_namespace> delete agent <agent_name>",
"hcp destroy cluster agent --name <hosted_cluster_name> --namespace <clusters_namepsace>",
"patch ingresscontroller -n openshift-ingress-operator default --type=json -p '[{ \"op\": \"add\", \"path\": \"/spec/routeAdmission\", \"value\": {wildcardPolicy: \"WildcardsAllowed\"}}]'",
"patch storageclass ocs-storagecluster-ceph-rbd -p '{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}'",
"get managedclusters local-cluster",
"hcp create cluster kubevirt --name <hosted-cluster-name> \\ 1 --node-pool-replicas <worker-count> \\ 2 --pull-secret <path-to-pull-secret> \\ 3 --memory <value-for-memory> \\ 4 --cores <value-for-cpu> \\ 5 --etcd-storage-class=<etcd-storage-class> 6",
"-n clusters-<hosted-cluster-name> get pods",
"NAME READY STATUS RESTARTS AGE capi-provider-5cc7b74f47-n5gkr 1/1 Running 0 3m catalog-operator-5f799567b7-fd6jw 2/2 Running 0 69s certified-operators-catalog-784b9899f9-mrp6p 1/1 Running 0 66s cluster-api-6bbc867966-l4dwl 1/1 Running 0 66s . . . redhat-operators-catalog-9d5fd4d44-z8qqk 1/1 Running 0 66s",
"get --namespace clusters hostedclusters",
"NAMESPACE NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE clusters example 4.x.0 example-admin-kubeconfig Completed True False The hosted control plane is available",
"hcp create cluster kubevirt --name <hosted-cluster-name> \\ 1 --node-pool-replicas <worker-count> \\ 2 --pull-secret <path-to-pull-secret> \\ 3 --memory <value-for-memory> \\ 4 --cores <value-for-cpu> \\ 5 --infra-namespace=<hosted-cluster-namespace>-<hosted-cluster-name> \\ 6 --infra-kubeconfig-file=<path-to-external-infra-kubeconfig> 7",
"*.apps.mgmt-cluster.example.com",
"*.apps.guest.apps.mgmt-cluster.example.com",
"patch ingresscontroller -n openshift-ingress-operator default --type=json -p '[{ \"op\": \"add\", \"path\": \"/spec/routeAdmission\", \"value\": {wildcardPolicy: \"WildcardsAllowed\"}}]'",
"hcp create cluster kubevirt --name <hosted-cluster-name> \\ 1 --node-pool-replicas <worker-count> \\ 2 --pull-secret <path-to-pull-secret> \\ 3 --memory <value-for-memory> \\ 4 --cores <value-for-cpu> \\ 5 --base-domain <basedomain> 6",
"get --namespace clusters hostedclusters",
"NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE example example-admin-kubeconfig Partial True False The hosted control plane is available",
"hcp create kubeconfig --name <hosted-cluster-name> > <hosted-cluster-name>-kubeconfig",
"--kubeconfig <hosted-cluster-name>-kubeconfig get co",
"NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE console 4.x.0 False False False 30m RouteHealthAvailable: failed to GET route (https://console-openshift-console.apps.example.hypershift.lab): Get \"https://console-openshift-console.apps.example.hypershift.lab\": dial tcp: lookup console-openshift-console.apps.example.hypershift.lab on 172.31.0.10:53: no such host ingress 4.x.0 True False True 28m The \"default\" ingress controller reports Degraded=True: DegradedConditions: One or more other status conditions indicate a degraded state: CanaryChecksSucceeding=False (CanaryChecksRepetitiveFailures: Canary route checks for the default ingress controller are failing)",
"--kubeconfig <hosted-cluster-name>-kubeconfig get services -n openshift-ingress router-nodeport-default -o jsonpath='{.spec.ports[?(@.name==\"http\")].nodePort}'",
"--kubeconfig <hosted-cluster-name>-kubeconfig get services -n openshift-ingress router-nodeport-default -o jsonpath='{.spec.ports[?(@.name==\"https\")].nodePort}'",
"apply -f - apiVersion: v1 kind: Service metadata: labels: app: <hosted-cluster-name> name: <hosted-cluster-name>-apps namespace: clusters-<hosted-cluster-name> spec: ports: - name: https-443 port: 443 protocol: TCP targetPort: <https-node-port> 1 - name: http-80 port: 80 protocol: TCP targetPort: <http-node-port> 2 selector: kubevirt.io: virt-launcher type: LoadBalancer",
"-n clusters-<hosted-cluster-name> get service <hosted-cluster-name>-apps -o jsonpath='{.status.loadBalancer.ingress[0].ip}'",
"192.168.20.30",
"*.apps.<hosted-cluster-name\\>.<base-domain\\>.",
"dig +short test.apps.example.hypershift.lab 192.168.20.30",
"get --namespace clusters hostedclusters",
"NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE example 4.x.0 example-admin-kubeconfig Completed True False The hosted control plane is available",
"apiVersion: metallb.io/v1beta1 kind: MetalLB metadata: name: metallb namespace: metallb-system",
"apply -f configure-metallb.yaml",
"metallb.metallb.io/metallb created",
"apiVersion: metallb.io/v1beta1 kind: IPAddressPool metadata: name: metallb namespace: metallb-system spec: addresses: - 192.168.216.32-192.168.216.122 1",
"apply -f create-ip-address-pool.yaml",
"ipaddresspool.metallb.io/metallb created",
"apiVersion: metallb.io/v1beta1 kind: L2Advertisement metadata: name: l2advertisement namespace: metallb-system spec: ipAddressPools: - metallb",
"apply -f l2advertisement.yaml",
"l2advertisement.metallb.io/metallb created",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --additional-network name:<namespace/name> \\ 6 --additional-network name:<namespace/name>",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --attach-default-network false \\ 6 --additional-network name:<namespace>/<network_name> 7",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --qos-class Guaranteed 6",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --vm-node-selector <label_key>=<label_value>,<label_key>=<label_value> 6",
"NODEPOOL_NAME=USD{CLUSTER_NAME}-work NODEPOOL_REPLICAS=5 scale nodepool/USDNODEPOOL_NAME --namespace clusters --replicas=USDNODEPOOL_REPLICAS",
"--kubeconfig USDCLUSTER_NAME-kubeconfig get nodes",
"NAME STATUS ROLES AGE VERSION example-9jvnf Ready worker 97s v1.27.4+18eadca example-n6prw Ready worker 116m v1.27.4+18eadca example-nc6g4 Ready worker 117m v1.27.4+18eadca example-thp29 Ready worker 4m17s v1.27.4+18eadca example-twxns Ready worker 88s v1.27.4+18eadca",
"export NODEPOOL_NAME=USD{CLUSTER_NAME}-extra-cpu export WORKER_COUNT=\"2\" export MEM=\"6Gi\" export CPU=\"4\" export DISK=\"16\" hcp create nodepool kubevirt --cluster-name USDCLUSTER_NAME --name USDNODEPOOL_NAME --node-count USDWORKER_COUNT --memory USDMEM --cores USDCPU --root-volume-size USDDISK",
"get nodepools --namespace clusters",
"NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE example example 5 5 False False 4.x.0 example-extra-cpu example 2 False False True True Minimum availability requires 2 replicas, current 0 available",
"--kubeconfig USDCLUSTER_NAME-kubeconfig get nodes",
"NAME STATUS ROLES AGE VERSION example-9jvnf Ready worker 97s v1.27.4+18eadca example-n6prw Ready worker 116m v1.27.4+18eadca example-nc6g4 Ready worker 117m v1.27.4+18eadca example-thp29 Ready worker 4m17s v1.27.4+18eadca example-twxns Ready worker 88s v1.27.4+18eadca example-extra-cpu-zh9l5 Ready worker 2m6s v1.27.4+18eadca example-extra-cpu-zr8mj Ready worker 102s v1.27.4+18eadca",
"get nodepools --namespace clusters",
"NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE example example 5 5 False False 4.x.0 example-extra-cpu example 2 2 False False 4.x.0",
"get --namespace clusters hostedclusters USD{CLUSTER_NAME}",
"NAMESPACE NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE clusters example 4.12.2 example-admin-kubeconfig Completed True False The hosted control plane is available",
"hcp create kubeconfig --name USDCLUSTER_NAME > USDCLUSTER_NAME-kubeconfig",
"get co --kubeconfig=USDCLUSTER_NAME-kubeconfig",
"NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE console 4.12.2 True False False 2m38s csi-snapshot-controller 4.12.2 True False False 4m3s dns 4.12.2 True False False 2m52s image-registry 4.12.2 True False False 2m8s ingress 4.12.2 True False False 22m kube-apiserver 4.12.2 True False False 23m kube-controller-manager 4.12.2 True False False 23m kube-scheduler 4.12.2 True False False 23m kube-storage-version-migrator 4.12.2 True False False 4m52s monitoring 4.12.2 True False False 69s network 4.12.2 True False False 4m3s node-tuning 4.12.2 True False False 2m22s openshift-apiserver 4.12.2 True False False 23m openshift-controller-manager 4.12.2 True False False 23m openshift-samples 4.12.2 True False False 2m15s operator-lifecycle-manager 4.12.2 True False False 22m operator-lifecycle-manager-catalog 4.12.2 True False False 23m operator-lifecycle-manager-packageserver 4.12.2 True False False 23m service-ca 4.12.2 True False False 4m41s storage 4.12.2 True False False 4m43s",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class> \\ 6",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class> \\ 6 --infra-volumesnapshot-class-mapping=<infrastructure_volume_snapshot_class>/<hosted_volume_snapshot_class> 7",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class>,group=<group_name> \\ 6 --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class>,group=<group_name> --infra-storage-class-mapping=<infrastructure_storage_class>/<hosted_storage_class>,group=<group_name> --infra-volumesnapshot-class-mapping=<infrastructure_volume_snapshot_class>/<hosted_volume_snapshot_class>,group=<group_name> \\ 7 --infra-volumesnapshot-class-mapping=<infrastructure_volume_snapshot_class>/<hosted_volume_snapshot_class>,group=<group_name>",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --root-volume-storage-class <root_volume_storage_class> \\ 6 --root-volume-size <volume_size> 7",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --root-volume-cache-strategy=PVC 6",
"hcp create cluster kubevirt --name <hosted_cluster_name> \\ 1 --node-pool-replicas <worker_node_count> \\ 2 --pull-secret <path_to_pull_secret> \\ 3 --memory <memory> \\ 4 --cores <cpu> \\ 5 --etcd-storage-class=<etcd_storage_class_name> 6",
"delete managedcluster <cluster_name>",
"hcp destroy cluster kubevirt --name USDCLUSTER_NAME",
"apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = [\"registry.access.redhat.com\", \"docker.io\"] [[registry]] prefix = \"\" location = \"registry.redhat.io/openshift4\" mirror-by-digest-only = true [[registry.mirror]] location = \"registry.ocp-edge-cluster-0.qe.lab.redhat.com:5000/openshift4\" [[registry]] prefix = \"\" location = \"registry.redhat.io/rhacm2\" mirror-by-digest-only = true",
"adm release info <tagged_openshift_release_image> | grep \"Pull From\"",
"Pull From: quay.io/openshift-release-dev/ocp-release@sha256:69d1292f64a2b67227c5592c1a7d499c7d00376e498634ff8e1946bc9ccdddfe",
"get events -n hypershift",
"LAST SEEN TYPE REASON OBJECT MESSAGE 4m46s Warning ReconcileError deployment/operator Failed to ensure UWM telemetry remote write: cannot get telemeter client secret: Secret \"telemeter-client\" not found",
"kind: ConfigMap apiVersion: v1 metadata: name: hypershift-operator-install-flags namespace: local-cluster data: installFlagsToAdd: \"\" installFlagsToRemove: \"--enable-uwm-telemetry-remote-write\"",
"patch mce <multiclusterengine> --type=merge -p '{\"spec\":{\"overrides\":{\"components\":[{\"name\":\"hypershift\",\"enabled\": true}]}}}'",
"get managedclusteraddons -n local-cluster hypershift-addon",
"NAME AVAILABLE DEGRADED PROGRESSING hypershift-addon True False",
"wait --for=condition=Degraded=True managedclusteraddons/hypershift-addon -n local-cluster --timeout=5m",
"wait --for=condition=Available=True managedclusteraddons/hypershift-addon -n local-cluster --timeout=5m",
"edit addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: hypershift-addon-deploy-config namespace: multicluster-engine spec: nodePlacement: nodeSelector: node-role.kubernetes.io/infra: \"\" tolerations: - effect: NoSchedule key: node-role.kubernetes.io/infra operator: Exists",
"sudo dnf install dnsmasq radvd vim golang podman bind-utils net-tools httpd-tools tree htop strace tmux -y",
"systemctl enable --now podman",
"sudo yum -y install libvirt libvirt-daemon-driver-qemu qemu-kvm",
"sudo usermod -aG qemu,libvirt USD(id -un)",
"sudo newgrp libvirt",
"sudo systemctl enable --now libvirtd",
"sudo dnf -y copr enable karmab/kcli",
"sudo dnf -y install kcli",
"sudo kcli create pool -p /var/lib/libvirt/images default",
"kcli create host kvm -H 127.0.0.1 local",
"sudo setfacl -m u:USD(id -un):rwx /var/lib/libvirt/images",
"kcli create network -c 192.168.122.0/24 default",
"#!/bin/bash export IP=\"192.168.126.1\" 1 export BASE_RESOLV_CONF=\"/run/NetworkManager/resolv.conf\" if ! [[ `grep -q \"USDIP\" /etc/resolv.conf` ]]; then export TMP_FILE=USD(mktemp /etc/forcedns_resolv.conf.XXXXXX) cp USDBASE_RESOLV_CONF USDTMP_FILE chmod --reference=USDBASE_RESOLV_CONF USDTMP_FILE sed -i -e \"s/dns.base.domain.name//\" -e \"s/search /& dns.base.domain.name /\" -e \"0,/nameserver/s/nameserver/& USDIP\\n&/\" USDTMP_FILE 2 mv USDTMP_FILE /etc/resolv.conf fi echo \"ok\"",
"chmod 755 /etc/NetworkManager/dispatcher.d/forcedns",
"sudo dnf install python3-pyOpenSSL.noarch python3-cherrypy -y",
"kcli create sushy-service --ssl --port 9000",
"sudo systemctl daemon-reload",
"systemctl enable --now ksushy",
"systemctl status ksushy",
"sed -i s/^SELINUX=.*USD/SELINUX=permissive/ /etc/selinux/config; setenforce 0",
"systemctl disable --now firewalld",
"systemctl restart libvirtd",
"systemctl enable --now libvirtd",
"#!/usr/bin/env bash set -euo pipefail PRIMARY_NIC=USD(ls -1 /sys/class/net | grep -v podman | head -1) export PATH=/root/bin:USDPATH export PULL_SECRET=\"/root/baremetal/hub/openshift_pull.json\" 1 if [[ ! -f USDPULL_SECRET ]];then echo \"Pull Secret not found, exiting...\" exit 1 fi dnf -y install podman httpd httpd-tools jq skopeo libseccomp-devel export IP=USD(ip -o addr show USDPRIMARY_NIC | head -1 | awk '{print USD4}' | cut -d'/' -f1) REGISTRY_NAME=registry.USD(hostname --long) REGISTRY_USER=dummy REGISTRY_PASSWORD=dummy KEY=USD(echo -n USDREGISTRY_USER:USDREGISTRY_PASSWORD | base64) echo \"{\\\"auths\\\": {\\\"USDREGISTRY_NAME:5000\\\": {\\\"auth\\\": \\\"USDKEY\\\", \\\"email\\\": \\\"[email protected]\\\"}}}\" > /root/disconnected_pull.json mv USD{PULL_SECRET} /root/openshift_pull.json.old jq \".auths += {\\\"USDREGISTRY_NAME:5000\\\": {\\\"auth\\\": \\\"USDKEY\\\",\\\"email\\\": \\\"[email protected]\\\"}}\" < /root/openshift_pull.json.old > USDPULL_SECRET mkdir -p /opt/registry/{auth,certs,data,conf} cat <<EOF > /opt/registry/conf/config.yml version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry delete: enabled: true http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 compatibility: schema1: enabled: true EOF openssl req -newkey rsa:4096 -nodes -sha256 -keyout /opt/registry/certs/domain.key -x509 -days 3650 -out /opt/registry/certs/domain.crt -subj \"/C=US/ST=Madrid/L=San Bernardo/O=Karmalabs/OU=Guitar/CN=USDREGISTRY_NAME\" -addext \"subjectAltName=DNS:USDREGISTRY_NAME\" cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ update-ca-trust extract htpasswd -bBc /opt/registry/auth/htpasswd USDREGISTRY_USER USDREGISTRY_PASSWORD create --name registry --net host --security-opt label=disable --replace -v /opt/registry/data:/var/lib/registry:z -v /opt/registry/auth:/auth:z -v /opt/registry/conf/config.yml:/etc/docker/registry/config.yml -e \"REGISTRY_AUTH=htpasswd\" -e \"REGISTRY_AUTH_HTPASSWD_REALM=Registry\" -e \"REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry\" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -v /opt/registry/certs:/certs:z -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key docker.io/library/registry:latest [ \"USD?\" == \"0\" ] || !! systemctl enable --now registry",
"chmod u+x USD{HOME}/registry.sh",
"USD{HOME}/registry.sh",
"systemctl status",
"systemctl start",
"systemctl stop",
"kcli create network -c 192.168.125.0/24 -P dhcp=false -P dns=false --domain dns.base.domain.name ipv4",
"kcli list network Listing Networks +---------+--------+---------------------+-------+------------------+------+ | Network | Type | Cidr | Dhcp | Domain | Mode | +---------+--------+---------------------+-------+------------------+------+ | default | routed | 192.168.122.0/24 | True | default | nat | | ipv4 | routed | 192.168.125.0/24 | False | dns.base.domain.name | nat | | ipv6 | routed | 2620:52:0:1306::/64 | False | dns.base.domain.name | nat | +---------+--------+---------------------+-------+------------------+------+",
"kcli info network ipv4 Providing information about network ipv4 cidr: 192.168.125.0/24 dhcp: false domain: dns.base.domain.name mode: nat plan: kvirt type: routed",
"plan: hub-ipv4 force: true version: nightly tag: \"4.x.y-x86_64\" 1 cluster: \"hub-ipv4\" domain: dns.base.domain.name api_ip: 192.168.125.10 ingress_ip: 192.168.125.11 disconnected_url: registry.dns.base.domain.name:5000 disconnected_update: true disconnected_user: dummy disconnected_password: dummy disconnected_operators_version: v4.14 disconnected_operators: - name: metallb-operator - name: lvms-operator channels: - name: stable-4.14 disconnected_extra_images: - quay.io/user-name/trbsht:latest - quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 dualstack: false disk_size: 200 extra_disks: [200] memory: 48000 numcpus: 16 ctlplanes: 3 workers: 0 manifests: extra-manifests metal3: true network: ipv4 users_dev: developer users_devpassword: developer users_admin: admin users_adminpassword: admin metallb_pool: ipv4-virtual-network metallb_ranges: - 192.168.125.150-192.168.125.190 metallb_autoassign: true apps: - users - lvms-operator - metallb-operator vmrules: - hub-bootstrap: nets: - name: ipv4 mac: aa:aa:aa:aa:02:10 - hub-ctlplane-0: nets: - name: ipv4 mac: aa:aa:aa:aa:02:01 - hub-ctlplane-1: nets: - name: ipv4 mac: aa:aa:aa:aa:02:02 - hub-ctlplane-2: nets: - name: ipv4 mac: aa:aa:aa:aa:02:03",
"kcli create cluster openshift --pf mgmt-compact-hub-ipv4.yaml",
"adm -a USD{LOCAL_SECRET_JSON} release extract --command=openshift-install \"USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}\"",
"#!/bin/bash WEBSRV_FOLDER=/opt/srv ROOTFS_IMG_URL=\"USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.pxe.rootfs.location')\" 1 LIVE_ISO_URL=\"USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.iso.disk.location')\" 2 mkdir -p USD{WEBSRV_FOLDER}/images curl -Lk USD{ROOTFS_IMG_URL} -o USD{WEBSRV_FOLDER}/images/USD{ROOTFS_IMG_URL##*/} curl -Lk USD{LIVE_ISO_URL} -o USD{WEBSRV_FOLDER}/images/USD{LIVE_ISO_URL##*/} chmod -R 755 USD{WEBSRV_FOLDER}/* ## Run Webserver ps --noheading | grep -q websrv-ai if [[ USD? == 0 ]];then echo \"Launching Registry pod...\" /usr/bin/podman run --name websrv-ai --net host -v /opt/srv:/usr/local/apache2/htdocs:z quay.io/alosadag/httpd:p8080 fi",
"apiVersion: mirror.openshift.io/v1alpha2 kind: ImageSetConfiguration storageConfig: registry: imageURL: registry.dns.base.domain.name:5000/openshift/release/metadata:latest 1 mirror: platform: channels: - name: candidate-4.14 minVersion: 4.x.y-x86_64 2 maxVersion: 4.x.y-x86_64 type: ocp graph: true additionalImages: - name: quay.io/karmab/origin-keepalived-ipfailover:latest - name: quay.io/karmab/kubectl:latest - name: quay.io/karmab/haproxy:latest - name: quay.io/karmab/mdns-publisher:latest - name: quay.io/karmab/origin-coredns:latest - name: quay.io/karmab/curl:latest - name: quay.io/karmab/kcli:latest - name: quay.io/user-name/trbsht:latest - name: quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - name: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 operators: - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14 packages: - name: lvms-operator - name: local-storage-operator - name: odf-csi-addons-operator - name: odf-operator - name: mcg-operator - name: ocs-operator - name: metallb-operator - name: kubevirt-hyperconverged",
"oc-mirror --source-skip-tls --config imagesetconfig.yaml docker://USD{REGISTRY}",
"REGISTRY=registry.USD(hostname --long):5000 adm release mirror --from=registry.ci.openshift.org/ocp/release:4.x.y-x86_64 --to=USD{REGISTRY}/openshift/release --to-release-image=USD{REGISTRY}/openshift/release-images:4.x.y-x86_64",
"get packagemanifest",
"apply -f oc-mirror-workspace/results-XXXXXX/imageContentSourcePolicy.yaml",
"apply -f catalogSource-XXXXXXXX-index.yaml",
"--- apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = [\"registry.access.redhat.com\", \"docker.io\"] [[registry]] prefix = \"\" location = \"registry.redhat.io/openshift4\" mirror-by-digest-only = true [[registry.mirror]] location = \"registry.dns.base.domain.name:5000/openshift4\" 1 [[registry]] prefix = \"\" location = \"registry.redhat.io/rhacm2\" mirror-by-digest-only = true",
"--- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: unsupported.agent-install.openshift.io/assisted-service-configmap: assisted-service-config 1 name: agent namespace: multicluster-engine spec: mirrorRegistryRef: name: custom-registries 2 databaseStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 10Gi filesystemStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 20Gi osImages: 3 - cpuArchitecture: x86_64 openshiftVersion: \"4.14\" rootFSUrl: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live-rootfs.x86_64.img 4 url: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live.x86_64.iso version: 414.92.202308281054-0",
"apply -f agentServiceConfig.yaml",
"assisted-image-service-0 1/1 Running 2 11d 1 assisted-service-668b49548-9m7xw 2/2 Running 5 11d 2",
"## REGISTRY_CERT_PATH=<PATH/TO/YOUR/CERTIFICATE/FILE> export REGISTRY_CERT_PATH=/opt/registry/certs/domain.crt create configmap user-ca-bundle -n openshift-config --from-file=ca-bundle.crt=USD{REGISTRY_CERT_PATH}",
"## REGISTRY_CERT_PATH=<PATH/TO/YOUR/CERTIFICATE/FILE> export REGISTRY_CERT_PATH=/opt/registry/certs/domain.crt export TMP_FILE=USD(mktemp) get cm -n openshift-config user-ca-bundle -ojsonpath='{.data.ca-bundle\\.crt}' > USD{TMP_FILE} echo >> USD{TMP_FILE} echo \\#registry.USD(hostname --long) >> USD{TMP_FILE} cat USD{REGISTRY_CERT_PATH} >> USD{TMP_FILE} create configmap user-ca-bundle -n openshift-config --from-file=ca-bundle.crt=USD{TMP_FILE} --dry-run=client -o yaml | kubectl apply -f -",
"--- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters-hosted-ipv4 spec: {} status: {} --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {}",
"--- apiVersion: v1 data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- kind: ConfigMap metadata: name: user-ca-bundle namespace: clusters --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxx kind: Secret metadata: creationTimestamp: null name: hosted-ipv4-pull-secret namespace: clusters --- apiVersion: v1 kind: Secret metadata: name: sshkey-cluster-hosted-ipv4 namespace: clusters stringData: id_rsa.pub: ssh-rsa xxxxxxxxx --- apiVersion: v1 data: key: nTPtVBEt03owkrKhIdmSW8jrWRxU57KO/fnZa8oaG0Y= kind: Secret metadata: creationTimestamp: null name: hosted-ipv4-etcd-encryption-key namespace: clusters type: Opaque",
"apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null name: capi-provider-role namespace: clusters-hosted-ipv4 rules: - apiGroups: - agent-install.openshift.io resources: - agents verbs: - '*'",
"apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: hosted-ipv4 namespace: clusters spec: additionalTrustBundle: name: \"user-ca-bundle\" olmCatalogPlacement: guest imageContentSources: 1 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev mirrors: - registry.dns.base.domain.name:5000/openshift/release - source: quay.io/openshift-release-dev/ocp-release mirrors: - registry.dns.base.domain.name:5000/openshift/release-images - mirrors: autoscaling: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: dns.base.domain.name etcd: managed: storage: persistentVolume: size: 8Gi restoreSnapshotURL: null type: PersistentVolume managementType: Managed fips: false networking: clusterNetwork: - cidr: 10.132.0.0/14 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 platform: agent: agentNamespace: clusters-hosted-ipv4 type: Agent pullSecret: name: hosted-ipv4-pull-secret release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 secretEncryption: aescbc: activeKey: name: hosted-ipv4-etcd-encryption-key type: aescbc services: - service: APIServer servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: OAuthServer servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: OIDC servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: Konnectivity servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort - service: Ignition servicePublishingStrategy: nodePort: address: api.hosted-ipv4.dns.base.domain.name type: NodePort sshKey: name: sshkey-cluster-hosted-ipv4 status: controlPlaneEndpoint: host: \"\" port: 0",
"adm release info registry.dns.base.domain.name:5000/openshift-release-dev/ocp-release:4.x.y-x86_64 | grep hypershift",
"hypershift sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8",
"pull registry.dns.base.domain.name:5000/openshift-release-dev/ocp-v4.0-art-dev@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8",
"pull registry.dns.base.domain.name:5000/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 Trying to pull registry.dns.base.domain.name:5000/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 Getting image source signatures Copying blob d8190195889e skipped: already exists Copying blob c71d2589fba7 skipped: already exists Copying blob d4dc6e74b6ce skipped: already exists Copying blob 97da74cc6d8f skipped: already exists Copying blob b70007a560c9 done Copying config 3a62961e6e done Writing manifest to image destination Storing signatures 3a62961e6ed6edab46d5ec8429ff1f41d6bb68de51271f037c6cb8941a007fde",
"apply -f 01-4.14-hosted_cluster-nodeport.yaml",
"NAME READY STATUS RESTARTS AGE capi-provider-5b57dbd6d5-pxlqc 1/1 Running 0 3m57s catalog-operator-9694884dd-m7zzv 2/2 Running 0 93s cluster-api-f98b9467c-9hfrq 1/1 Running 0 3m57s cluster-autoscaler-d7f95dd5-d8m5d 1/1 Running 0 93s cluster-image-registry-operator-5ff5944b4b-648ht 1/2 Running 0 93s cluster-network-operator-77b896ddc-wpkq8 1/1 Running 0 94s cluster-node-tuning-operator-84956cd484-4hfgf 1/1 Running 0 94s cluster-policy-controller-5fd8595d97-rhbwf 1/1 Running 0 95s cluster-storage-operator-54dcf584b5-xrnts 1/1 Running 0 93s cluster-version-operator-9c554b999-l22s7 1/1 Running 0 95s control-plane-operator-6fdc9c569-t7hr4 1/1 Running 0 3m57s csi-snapshot-controller-785c6dc77c-8ljmr 1/1 Running 0 77s csi-snapshot-controller-operator-7c6674bc5b-d9dtp 1/1 Running 0 93s csi-snapshot-webhook-5b8584875f-2492j 1/1 Running 0 77s dns-operator-6874b577f-9tc6b 1/1 Running 0 94s etcd-0 3/3 Running 0 3m39s hosted-cluster-config-operator-f5cf5c464-4nmbh 1/1 Running 0 93s ignition-server-6b689748fc-zdqzk 1/1 Running 0 95s ignition-server-proxy-54d4bb9b9b-6zkg7 1/1 Running 0 95s ingress-operator-6548dc758b-f9gtg 1/2 Running 0 94s konnectivity-agent-7767cdc6f5-tw782 1/1 Running 0 95s kube-apiserver-7b5799b6c8-9f5bp 4/4 Running 0 3m7s kube-controller-manager-5465bc4dd6-zpdlk 1/1 Running 0 44s kube-scheduler-5dd5f78b94-bbbck 1/1 Running 0 2m36s machine-approver-846c69f56-jxvfr 1/1 Running 0 92s oauth-openshift-79c7bf44bf-j975g 2/2 Running 0 62s olm-operator-767f9584c-4lcl2 2/2 Running 0 93s openshift-apiserver-5d469778c6-pl8tj 3/3 Running 0 2m36s openshift-controller-manager-6475fdff58-hl4f7 1/1 Running 0 95s openshift-oauth-apiserver-dbbc5cc5f-98574 2/2 Running 0 95s openshift-route-controller-manager-5f6997b48f-s9vdc 1/1 Running 0 95s packageserver-67c87d4d4f-kl7qh 2/2 Running 0 93s",
"NAMESPACE NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE clusters hosted-ipv4 hosted-admin-kubeconfig Partial True False The hosted control plane is available",
"apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: hosted-ipv4 namespace: clusters spec: arch: amd64 clusterName: hosted-ipv4 management: autoRepair: false 1 upgradeType: InPlace 2 nodeDrainTimeout: 0s platform: type: Agent release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 replicas: 0 status: replicas: 0 4",
"apply -f 02-nodepool.yaml",
"NAMESPACE NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE clusters hosted-ipv4 hosted 0 False False 4.x.y-x86_64",
"--- apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: hosted-ipv4 namespace: clusters-hosted-ipv4 spec: pullSecretRef: 1 name: pull-secret sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk7ICaUE+/k4zTpxLk4+xFdHi4ZuDi5qjeF52afsNkw0w/glILHhwpL5gnp5WkRuL8GwJuZ1VqLC9EKrdmegn4MrmUlq7WTsP0VFOZFBfq2XRUxo1wrRdor2z0Bbh93ytR+ZsDbbLlGngXaMa0Vbt+z74FqlcajbHTZ6zBmTpBVq5RHtDPgKITdpE1fongp7+ZXQNBlkaavaqv8bnyrP4BWahLP4iO9/xJF9lQYboYwEEDzmnKLMW1VtCE6nJzEgWCufACTbxpNS7GvKtoHT/OVzw8ArEXhZXQUS1UY8zKsX2iXwmyhw5Sj6YboA8WICs4z+TrFP89LmxXY0j6536TQFyRz1iB4WWvCbH5n6W+ABV2e8ssJB1AmEy8QYNwpJQJNpSxzoKBjI73XxvPYYC/IjPFMySwZqrSZCkJYqQ023ySkaQxWZT7in4KeMu7eS2tC+Kn4deJ7KwwUycx8n6RHMeD8Qg9flTHCv3gmab8JKZJqN3hW1D378JuvmIX4V0= 2",
"apply -f 03-infraenv.yaml",
"NAMESPACE NAME ISO CREATED AT clusters-hosted-ipv4 hosted 2023-09-11T15:14:10Z",
"kcli delete plan hosted-ipv4",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-ipv4 -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"ipv4\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:02:11\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0211 -P name=hosted-ipv4-worker0",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-ipv4 -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"ipv4\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:02:12\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0212 -P name=hosted-ipv4-worker1",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-ipv4 -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"ipv4\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:02:13\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0213 -P name=hosted-ipv4-worker2",
"systemctl restart ksushy",
"+---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+ | Name | Status | Ip | Source | Plan | Profile | +---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+ | hosted-worker0 | down | | | hosted-ipv4 | kvirt | | hosted-worker1 | down | | | hosted-ipv4 | kvirt | | hosted-worker2 | down | | | hosted-ipv4 | kvirt | +---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+",
"--- apiVersion: v1 kind: Secret metadata: name: hosted-ipv4-worker0-bmc-secret namespace: clusters-hosted-ipv4 data: password: YWRtaW4= username: YWRtaW4= type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: hosted-ipv4-worker0 namespace: clusters-hosted-ipv4 labels: infraenvs.agent-install.openshift.io: hosted-ipv4 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: hosted-ipv4-worker0 2 spec: automatedCleaningMode: disabled 3 bmc: disableCertificateVerification: true 4 address: redfish-virtualmedia://[192.168.125.1]:9000/redfish/v1/Systems/local/hosted-ipv4-worker0 5 credentialsName: hosted-ipv4-worker0-bmc-secret 6 bootMACAddress: aa:aa:aa:aa:02:11 7 online: true 8",
"apply -f 04-bmh.yaml",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 registering true 2s clusters-hosted hosted-worker1 registering true 2s clusters-hosted hosted-worker2 registering true 2s",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 provisioning true 16s clusters-hosted hosted-worker1 provisioning true 16s clusters-hosted hosted-worker2 provisioning true 16s",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 provisioned true 67s clusters-hosted hosted-worker1 provisioned true 67s clusters-hosted hosted-worker2 provisioned true 67s",
"NAMESPACE NAME CLUSTER APPROVED ROLE STAGE clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0411 true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0412 true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0413 true auto-assign",
"-n clusters scale nodepool hosted-ipv4 --replicas 3",
"NAMESPACE NAME CLUSTER APPROVED ROLE STAGE clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0411 hosted true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0412 hosted true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0413 hosted true auto-assign",
"NAMESPACE NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE clusters hosted hosted 3 False False 4.x.y-x86_64 Minimum availability requires 3 replicas, current 0 available",
"export KUBECONFIG=/root/.kcli/clusters/hub-ipv4/auth/kubeconfig",
"watch \"oc get pod -n hypershift;echo;echo;oc get pod -n clusters-hosted-ipv4;echo;echo;oc get bmh -A;echo;echo;oc get agent -A;echo;echo;oc get infraenv -A;echo;echo;oc get hostedcluster -A;echo;echo;oc get nodepool -A;echo;echo;\"",
"get secret -n clusters-hosted-ipv4 admin-kubeconfig -o jsonpath='{.data.kubeconfig}' |base64 -d > /root/hc_admin_kubeconfig.yaml",
"export KUBECONFIG=/root/hc_admin_kubeconfig.yaml",
"watch \"oc get clusterversion,nodes,co\"",
"sudo dnf install dnsmasq radvd vim golang podman bind-utils net-tools httpd-tools tree htop strace tmux -y",
"systemctl enable --now podman",
"sudo yum -y install libvirt libvirt-daemon-driver-qemu qemu-kvm",
"sudo usermod -aG qemu,libvirt USD(id -un)",
"sudo newgrp libvirt",
"sudo systemctl enable --now libvirtd",
"sudo dnf -y copr enable karmab/kcli",
"sudo dnf -y install kcli",
"sudo kcli create pool -p /var/lib/libvirt/images default",
"kcli create host kvm -H 127.0.0.1 local",
"sudo setfacl -m u:USD(id -un):rwx /var/lib/libvirt/images",
"kcli create network -c 192.168.122.0/24 default",
"#!/bin/bash export IP=\"2620:52:0:1306::1\" 1 export BASE_RESOLV_CONF=\"/run/NetworkManager/resolv.conf\" if ! [[ `grep -q \"USDIP\" /etc/resolv.conf` ]]; then export TMP_FILE=USD(mktemp /etc/forcedns_resolv.conf.XXXXXX) cp USDBASE_RESOLV_CONF USDTMP_FILE chmod --reference=USDBASE_RESOLV_CONF USDTMP_FILE sed -i -e \"s/dns.base.domain.name//\" -e \"s/search /& dns.base.domain.name /\" -e \"0,/nameserver/s/nameserver/& USDIP\\n&/\" USDTMP_FILE 2 mv USDTMP_FILE /etc/resolv.conf fi echo \"ok\"",
"chmod 755 /etc/NetworkManager/dispatcher.d/forcedns",
"sudo dnf install python3-pyOpenSSL.noarch python3-cherrypy -y",
"kcli create sushy-service --ssl --ipv6 --port 9000",
"sudo systemctl daemon-reload",
"systemctl enable --now ksushy",
"systemctl status ksushy",
"sed -i s/^SELINUX=.*USD/SELINUX=permissive/ /etc/selinux/config; setenforce 0",
"systemctl disable --now firewalld",
"systemctl restart libvirtd",
"systemctl enable --now libvirtd",
"#!/usr/bin/env bash set -euo pipefail PRIMARY_NIC=USD(ls -1 /sys/class/net | grep -v podman | head -1) export PATH=/root/bin:USDPATH export PULL_SECRET=\"/root/baremetal/hub/openshift_pull.json\" 1 if [[ ! -f USDPULL_SECRET ]];then echo \"Pull Secret not found, exiting...\" exit 1 fi dnf -y install podman httpd httpd-tools jq skopeo libseccomp-devel export IP=USD(ip -o addr show USDPRIMARY_NIC | head -1 | awk '{print USD4}' | cut -d'/' -f1) REGISTRY_NAME=registry.USD(hostname --long) REGISTRY_USER=dummy REGISTRY_PASSWORD=dummy KEY=USD(echo -n USDREGISTRY_USER:USDREGISTRY_PASSWORD | base64) echo \"{\\\"auths\\\": {\\\"USDREGISTRY_NAME:5000\\\": {\\\"auth\\\": \\\"USDKEY\\\", \\\"email\\\": \\\"[email protected]\\\"}}}\" > /root/disconnected_pull.json mv USD{PULL_SECRET} /root/openshift_pull.json.old jq \".auths += {\\\"USDREGISTRY_NAME:5000\\\": {\\\"auth\\\": \\\"USDKEY\\\",\\\"email\\\": \\\"[email protected]\\\"}}\" < /root/openshift_pull.json.old > USDPULL_SECRET mkdir -p /opt/registry/{auth,certs,data,conf} cat <<EOF > /opt/registry/conf/config.yml version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry delete: enabled: true http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 compatibility: schema1: enabled: true EOF openssl req -newkey rsa:4096 -nodes -sha256 -keyout /opt/registry/certs/domain.key -x509 -days 3650 -out /opt/registry/certs/domain.crt -subj \"/C=US/ST=Madrid/L=San Bernardo/O=Karmalabs/OU=Guitar/CN=USDREGISTRY_NAME\" -addext \"subjectAltName=DNS:USDREGISTRY_NAME\" cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ update-ca-trust extract htpasswd -bBc /opt/registry/auth/htpasswd USDREGISTRY_USER USDREGISTRY_PASSWORD create --name registry --net host --security-opt label=disable --replace -v /opt/registry/data:/var/lib/registry:z -v /opt/registry/auth:/auth:z -v /opt/registry/conf/config.yml:/etc/docker/registry/config.yml -e \"REGISTRY_AUTH=htpasswd\" -e \"REGISTRY_AUTH_HTPASSWD_REALM=Registry\" -e \"REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry\" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -v /opt/registry/certs:/certs:z -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key docker.io/library/registry:latest [ \"USD?\" == \"0\" ] || !! systemctl enable --now registry",
"chmod u+x USD{HOME}/registry.sh",
"USD{HOME}/registry.sh",
"systemctl status",
"systemctl start",
"systemctl stop",
"kcli create network -c 2620:52:0:1305::0/64 -P dhcp=false -P dns=false --domain dns.base.domain.name --nodhcp ipv6",
"kcli list network Listing Networks +---------+--------+---------------------+-------+------------------+------+ | Network | Type | Cidr | Dhcp | Domain | Mode | +---------+--------+---------------------+-------+------------------+------+ | default | routed | 192.168.122.0/24 | True | default | nat | | ipv4 | routed | 192.168.125.0/24 | False | dns.base.domain.name | nat | | ipv4 | routed | 2620:52:0:1305::/64 | False | dns.base.domain.name | nat | +---------+--------+---------------------+-------+------------------+------+",
"kcli info network ipv6 Providing information about network ipv6 cidr: 2620:52:0:1305::/64 dhcp: false domain: dns.base.domain.name mode: nat plan: kvirt type: routed",
"plan: hub-ipv6 force: true version: nightly tag: \"4.x.y-x86_64\" cluster: \"hub-ipv6\" ipv6: true domain: dns.base.domain.name api_ip: 2620:52:0:1305::2 ingress_ip: 2620:52:0:1305::3 disconnected_url: registry.dns.base.domain.name:5000 disconnected_update: true disconnected_user: dummy disconnected_password: dummy disconnected_operators_version: v4.14 disconnected_operators: - name: metallb-operator - name: lvms-operator channels: - name: stable-4.14 disconnected_extra_images: - quay.io/user-name/trbsht:latest - quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 dualstack: false disk_size: 200 extra_disks: [200] memory: 48000 numcpus: 16 ctlplanes: 3 workers: 0 manifests: extra-manifests metal3: true network: ipv6 users_dev: developer users_devpassword: developer users_admin: admin users_adminpassword: admin metallb_pool: ipv6-virtual-network metallb_ranges: - 2620:52:0:1305::150-2620:52:0:1305::190 metallb_autoassign: true apps: - users - lvms-operator - metallb-operator vmrules: - hub-bootstrap: nets: - name: ipv6 mac: aa:aa:aa:aa:03:10 - hub-ctlplane-0: nets: - name: ipv6 mac: aa:aa:aa:aa:03:01 - hub-ctlplane-1: nets: - name: ipv6 mac: aa:aa:aa:aa:03:02 - hub-ctlplane-2: nets: - name: ipv6 mac: aa:aa:aa:aa:03:03",
"kcli create cluster openshift --pf mgmt-compact-hub-ipv6.yaml",
"adm -a USD{LOCAL_SECRET_JSON} release extract --command=openshift-install \"USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}\"",
"#!/bin/bash WEBSRV_FOLDER=/opt/srv ROOTFS_IMG_URL=\"USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.pxe.rootfs.location')\" 1 LIVE_ISO_URL=\"USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.iso.disk.location')\" 2 mkdir -p USD{WEBSRV_FOLDER}/images curl -Lk USD{ROOTFS_IMG_URL} -o USD{WEBSRV_FOLDER}/images/USD{ROOTFS_IMG_URL##*/} curl -Lk USD{LIVE_ISO_URL} -o USD{WEBSRV_FOLDER}/images/USD{LIVE_ISO_URL##*/} chmod -R 755 USD{WEBSRV_FOLDER}/* ## Run Webserver ps --noheading | grep -q websrv-ai if [[ USD? == 0 ]];then echo \"Launching Registry pod...\" /usr/bin/podman run --name websrv-ai --net host -v /opt/srv:/usr/local/apache2/htdocs:z quay.io/alosadag/httpd:p8080 fi",
"apiVersion: mirror.openshift.io/v1alpha2 kind: ImageSetConfiguration storageConfig: registry: imageURL: registry.dns.base.domain.name:5000/openshift/release/metadata:latest 1 mirror: platform: channels: - name: candidate-4.14 minVersion: 4.x.y-x86_64 maxVersion: 4.x.y-x86_64 type: ocp graph: true additionalImages: - name: quay.io/karmab/origin-keepalived-ipfailover:latest - name: quay.io/karmab/kubectl:latest - name: quay.io/karmab/haproxy:latest - name: quay.io/karmab/mdns-publisher:latest - name: quay.io/karmab/origin-coredns:latest - name: quay.io/karmab/curl:latest - name: quay.io/karmab/kcli:latest - name: quay.io/user-name/trbsht:latest - name: quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - name: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 operators: - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14 packages: - name: lvms-operator - name: local-storage-operator - name: odf-csi-addons-operator - name: odf-operator - name: mcg-operator - name: ocs-operator - name: metallb-operator",
"oc-mirror --source-skip-tls --config imagesetconfig.yaml docker://USD{REGISTRY}",
"REGISTRY=registry.USD(hostname --long):5000 adm release mirror --from=registry.ci.openshift.org/ocp/release:4.x.y-x86_64 --to=USD{REGISTRY}/openshift/release --to-release-image=USD{REGISTRY}/openshift/release-images:4.x.y-x86_64",
"get packagemanifest",
"apply -f oc-mirror-workspace/results-XXXXXX/imageContentSourcePolicy.yaml",
"apply -f catalogSource-XXXXXXXX-index.yaml",
"--- apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = [\"registry.access.redhat.com\", \"docker.io\"] [[registry]] prefix = \"\" location = \"registry.redhat.io/openshift4\" mirror-by-digest-only = true [[registry.mirror]] location = \"registry.dns.base.domain.name:5000/openshift4\" 1 [[registry]] prefix = \"\" location = \"registry.redhat.io/rhacm2\" mirror-by-digest-only = true",
"--- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: unsupported.agent-install.openshift.io/assisted-service-configmap: assisted-service-config 1 name: agent namespace: multicluster-engine spec: mirrorRegistryRef: name: custom-registries 2 databaseStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 10Gi filesystemStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 20Gi osImages: 3 - cpuArchitecture: x86_64 openshiftVersion: \"4.14\" rootFSUrl: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live-rootfs.x86_64.img 4 url: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live.x86_64.iso version: 414.92.202308281054-0",
"apply -f agentServiceConfig.yaml",
"assisted-image-service-0 1/1 Running 2 11d 1 assisted-service-668b49548-9m7xw 2/2 Running 5 11d 2",
"## REGISTRY_CERT_PATH=<PATH/TO/YOUR/CERTIFICATE/FILE> export REGISTRY_CERT_PATH=/opt/registry/certs/domain.crt create configmap user-ca-bundle -n openshift-config --from-file=ca-bundle.crt=USD{REGISTRY_CERT_PATH}",
"## REGISTRY_CERT_PATH=<PATH/TO/YOUR/CERTIFICATE/FILE> export REGISTRY_CERT_PATH=/opt/registry/certs/domain.crt export TMP_FILE=USD(mktemp) get cm -n openshift-config user-ca-bundle -ojsonpath='{.data.ca-bundle\\.crt}' > USD{TMP_FILE} echo >> USD{TMP_FILE} echo \\#registry.USD(hostname --long) >> USD{TMP_FILE} cat USD{REGISTRY_CERT_PATH} >> USD{TMP_FILE} create configmap user-ca-bundle -n openshift-config --from-file=ca-bundle.crt=USD{TMP_FILE} --dry-run=client -o yaml | kubectl apply -f -",
"--- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters-hosted-ipv6 spec: {} status: {} --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {}",
"--- apiVersion: v1 data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- kind: ConfigMap metadata: name: user-ca-bundle namespace: clusters --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxx kind: Secret metadata: creationTimestamp: null name: hosted-ipv6-pull-secret namespace: clusters --- apiVersion: v1 kind: Secret metadata: name: sshkey-cluster-hosted-ipv6 namespace: clusters stringData: id_rsa.pub: ssh-rsa xxxxxxxxx --- apiVersion: v1 data: key: nTPtVBEt03owkrKhIdmSW8jrWRxU57KO/fnZa8oaG0Y= kind: Secret metadata: creationTimestamp: null name: hosted-ipv6-etcd-encryption-key namespace: clusters type: Opaque",
"apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null name: capi-provider-role namespace: clusters-hosted-ipv6 rules: - apiGroups: - agent-install.openshift.io resources: - agents verbs: - '*'",
"apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: hosted-ipv6 namespace: clusters annotations: hypershift.openshift.io/control-plane-operator-image: registry.ocp-edge-cluster-0.qe.lab.redhat.com:5005/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 spec: additionalTrustBundle: name: \"user-ca-bundle\" olmCatalogPlacement: guest imageContentSources: 1 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev mirrors: - registry.dns.base.domain.name:5000/openshift/release - source: quay.io/openshift-release-dev/ocp-release mirrors: - registry.dns.base.domain.name:5000/openshift/release-images - mirrors: autoscaling: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: dns.base.domain.name etcd: managed: storage: persistentVolume: size: 8Gi restoreSnapshotURL: null type: PersistentVolume managementType: Managed fips: false networking: clusterNetwork: - cidr: 10.132.0.0/14 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 platform: agent: agentNamespace: clusters-hosted-ipv6 type: Agent pullSecret: name: hosted-ipv6-pull-secret release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 secretEncryption: aescbc: activeKey: name: hosted-ipv6-etcd-encryption-key type: aescbc services: - service: APIServer servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: OAuthServer servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: OIDC servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: Konnectivity servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort - service: Ignition servicePublishingStrategy: nodePort: address: api.hosted-ipv6.dns.base.domain.name type: NodePort sshKey: name: sshkey-cluster-hosted-ipv6 status: controlPlaneEndpoint: host: \"\" port: 0",
"adm release info registry.dns.base.domain.name:5000/openshift-release-dev/ocp-release:4.x.y-x86_64 | grep hypershift",
"hypershift sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8",
"pull registry.dns.base.domain.name:5000/openshift-release-dev/ocp-v4.0-art-dev@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8",
"pull registry.dns.base.domain.name:5000/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 Trying to pull registry.dns.base.domain.name:5000/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 Getting image source signatures Copying blob d8190195889e skipped: already exists Copying blob c71d2589fba7 skipped: already exists Copying blob d4dc6e74b6ce skipped: already exists Copying blob 97da74cc6d8f skipped: already exists Copying blob b70007a560c9 done Copying config 3a62961e6e done Writing manifest to image destination Storing signatures 3a62961e6ed6edab46d5ec8429ff1f41d6bb68de51271f037c6cb8941a007fde",
"apply -f 01-4.14-hosted_cluster-nodeport.yaml",
"NAME READY STATUS RESTARTS AGE capi-provider-5b57dbd6d5-pxlqc 1/1 Running 0 3m57s catalog-operator-9694884dd-m7zzv 2/2 Running 0 93s cluster-api-f98b9467c-9hfrq 1/1 Running 0 3m57s cluster-autoscaler-d7f95dd5-d8m5d 1/1 Running 0 93s cluster-image-registry-operator-5ff5944b4b-648ht 1/2 Running 0 93s cluster-network-operator-77b896ddc-wpkq8 1/1 Running 0 94s cluster-node-tuning-operator-84956cd484-4hfgf 1/1 Running 0 94s cluster-policy-controller-5fd8595d97-rhbwf 1/1 Running 0 95s cluster-storage-operator-54dcf584b5-xrnts 1/1 Running 0 93s cluster-version-operator-9c554b999-l22s7 1/1 Running 0 95s control-plane-operator-6fdc9c569-t7hr4 1/1 Running 0 3m57s csi-snapshot-controller-785c6dc77c-8ljmr 1/1 Running 0 77s csi-snapshot-controller-operator-7c6674bc5b-d9dtp 1/1 Running 0 93s csi-snapshot-webhook-5b8584875f-2492j 1/1 Running 0 77s dns-operator-6874b577f-9tc6b 1/1 Running 0 94s etcd-0 3/3 Running 0 3m39s hosted-cluster-config-operator-f5cf5c464-4nmbh 1/1 Running 0 93s ignition-server-6b689748fc-zdqzk 1/1 Running 0 95s ignition-server-proxy-54d4bb9b9b-6zkg7 1/1 Running 0 95s ingress-operator-6548dc758b-f9gtg 1/2 Running 0 94s konnectivity-agent-7767cdc6f5-tw782 1/1 Running 0 95s kube-apiserver-7b5799b6c8-9f5bp 4/4 Running 0 3m7s kube-controller-manager-5465bc4dd6-zpdlk 1/1 Running 0 44s kube-scheduler-5dd5f78b94-bbbck 1/1 Running 0 2m36s machine-approver-846c69f56-jxvfr 1/1 Running 0 92s oauth-openshift-79c7bf44bf-j975g 2/2 Running 0 62s olm-operator-767f9584c-4lcl2 2/2 Running 0 93s openshift-apiserver-5d469778c6-pl8tj 3/3 Running 0 2m36s openshift-controller-manager-6475fdff58-hl4f7 1/1 Running 0 95s openshift-oauth-apiserver-dbbc5cc5f-98574 2/2 Running 0 95s openshift-route-controller-manager-5f6997b48f-s9vdc 1/1 Running 0 95s packageserver-67c87d4d4f-kl7qh 2/2 Running 0 93s",
"NAMESPACE NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE clusters hosted-ipv6 hosted-admin-kubeconfig Partial True False The hosted control plane is available",
"apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: hosted-ipv6 namespace: clusters spec: arch: amd64 clusterName: hosted-ipv6 management: autoRepair: false 1 upgradeType: InPlace 2 nodeDrainTimeout: 0s platform: type: Agent release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 replicas: 0 status: replicas: 0 4",
"apply -f 02-nodepool.yaml",
"NAMESPACE NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE clusters hosted-ipv6 hosted 0 False False 4.x.y-x86_64",
"--- apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: hosted-ipv6 namespace: clusters-hosted-ipv6 spec: pullSecretRef: 1 name: pull-secret sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk7ICaUE+/k4zTpxLk4+xFdHi4ZuDi5qjeF52afsNkw0w/glILHhwpL5gnp5WkRuL8GwJuZ1VqLC9EKrdmegn4MrmUlq7WTsP0VFOZFBfq2XRUxo1wrRdor2z0Bbh93ytR+ZsDbbLlGngXaMa0Vbt+z74FqlcajbHTZ6zBmTpBVq5RHtDPgKITdpE1fongp7+ZXQNBlkaavaqv8bnyrP4BWahLP4iO9/xJF9lQYboYwEEDzmnKLMW1VtCE6nJzEgWCufACTbxpNS7GvKtoHT/OVzw8ArEXhZXQUS1UY8zKsX2iXwmyhw5Sj6YboA8WICs4z+TrFP89LmxXY0j6536TQFyRz1iB4WWvCbH5n6W+ABV2e8ssJB1AmEy8QYNwpJQJNpSxzoKBjI73XxvPYYC/IjPFMySwZqrSZCkJYqQ023ySkaQxWZT7in4KeMu7eS2tC+Kn4deJ7KwwUycx8n6RHMeD8Qg9flTHCv3gmab8JKZJqN3hW1D378JuvmIX4V0= 2",
"apply -f 03-infraenv.yaml",
"NAMESPACE NAME ISO CREATED AT clusters-hosted-ipv6 hosted 2023-09-11T15:14:10Z",
"kcli delete plan hosted-ipv6",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-ipv6 -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"ipv6\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:02:11\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0211 -P name=hosted-ipv6-worker0",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-ipv6 -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"ipv6\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:02:12\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0212 -P name=hosted-ipv6-worker1",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-ipv6 -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"ipv6\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:02:13\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0213 -P name=hosted-ipv6-worker2",
"systemctl restart ksushy",
"+---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+ | Name | Status | Ip | Source | Plan | Profile | +---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+ | hosted-worker0 | down | | | hosted-ipv6 | kvirt | | hosted-worker1 | down | | | hosted-ipv6 | kvirt | | hosted-worker2 | down | | | hosted-ipv6 | kvirt | +---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+",
"--- apiVersion: v1 kind: Secret metadata: name: hosted-ipv6-worker0-bmc-secret namespace: clusters-hosted-ipv6 data: password: YWRtaW4= username: YWRtaW4= type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: hosted-ipv6-worker0 namespace: clusters-hosted-ipv6 labels: infraenvs.agent-install.openshift.io: hosted-ipv6 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: hosted-ipv6-worker0 2 spec: automatedCleaningMode: disabled 3 bmc: disableCertificateVerification: true 4 address: redfish-virtualmedia://[192.168.125.1]:9000/redfish/v1/Systems/local/hosted-ipv6-worker0 5 credentialsName: hosted-ipv6-worker0-bmc-secret 6 bootMACAddress: aa:aa:aa:aa:03:11 7 online: true 8",
"apply -f 04-bmh.yaml",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 registering true 2s clusters-hosted hosted-worker1 registering true 2s clusters-hosted hosted-worker2 registering true 2s",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 provisioning true 16s clusters-hosted hosted-worker1 provisioning true 16s clusters-hosted hosted-worker2 provisioning true 16s",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 provisioned true 67s clusters-hosted hosted-worker1 provisioned true 67s clusters-hosted hosted-worker2 provisioned true 67s",
"NAMESPACE NAME CLUSTER APPROVED ROLE STAGE clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0411 true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0412 true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0413 true auto-assign",
"-n clusters scale nodepool hosted-ipv6 --replicas 3",
"NAMESPACE NAME CLUSTER APPROVED ROLE STAGE clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0211 hosted true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0212 hosted true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0213 hosted true auto-assign",
"NAMESPACE NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE clusters hosted hosted 3 False False 4.x.y-x86_64 Minimum availability requires 3 replicas, current 0 available",
"export KUBECONFIG=/root/.kcli/clusters/hub-ipv4/auth/kubeconfig",
"watch \"oc get pod -n hypershift;echo;echo;oc get pod -n clusters-hosted-ipv4;echo;echo;oc get bmh -A;echo;echo;oc get agent -A;echo;echo;oc get infraenv -A;echo;echo;oc get hostedcluster -A;echo;echo;oc get nodepool -A;echo;echo;\"",
"get secret -n clusters-hosted-ipv4 admin-kubeconfig -o jsonpath='{.data.kubeconfig}' |base64 -d > /root/hc_admin_kubeconfig.yaml",
"export KUBECONFIG=/root/hc_admin_kubeconfig.yaml",
"watch \"oc get clusterversion,nodes,co\"",
"sudo dnf install dnsmasq radvd vim golang podman bind-utils net-tools httpd-tools tree htop strace tmux -y",
"systemctl enable --now podman",
"sudo yum -y install libvirt libvirt-daemon-driver-qemu qemu-kvm",
"sudo usermod -aG qemu,libvirt USD(id -un)",
"sudo newgrp libvirt",
"sudo systemctl enable --now libvirtd",
"sudo dnf -y copr enable karmab/kcli",
"sudo dnf -y install kcli",
"sudo kcli create pool -p /var/lib/libvirt/images default",
"kcli create host kvm -H 127.0.0.1 local",
"sudo setfacl -m u:USD(id -un):rwx /var/lib/libvirt/images",
"kcli create network -c 192.168.122.0/24 default",
"#!/bin/bash export IP=\"192.168.126.1\" 1 export BASE_RESOLV_CONF=\"/run/NetworkManager/resolv.conf\" if ! [[ `grep -q \"USDIP\" /etc/resolv.conf` ]]; then export TMP_FILE=USD(mktemp /etc/forcedns_resolv.conf.XXXXXX) cp USDBASE_RESOLV_CONF USDTMP_FILE chmod --reference=USDBASE_RESOLV_CONF USDTMP_FILE sed -i -e \"s/dns.base.domain.name//\" -e \"s/search /& dns.base.domain.name /\" -e \"0,/nameserver/s/nameserver/& USDIP\\n&/\" USDTMP_FILE 2 mv USDTMP_FILE /etc/resolv.conf fi echo \"ok\"",
"chmod 755 /etc/NetworkManager/dispatcher.d/forcedns",
"sudo dnf install python3-pyOpenSSL.noarch python3-cherrypy -y",
"kcli create sushy-service --ssl --ipv6 --port 9000",
"sudo systemctl daemon-reload",
"systemctl enable --now ksushy",
"systemctl status ksushy",
"sed -i s/^SELINUX=.*USD/SELINUX=permissive/ /etc/selinux/config; setenforce 0",
"systemctl disable --now firewalld",
"systemctl restart libvirtd",
"systemctl enable --now libvirtd",
"#!/usr/bin/env bash set -euo pipefail PRIMARY_NIC=USD(ls -1 /sys/class/net | grep -v podman | head -1) export PATH=/root/bin:USDPATH export PULL_SECRET=\"/root/baremetal/hub/openshift_pull.json\" 1 if [[ ! -f USDPULL_SECRET ]];then echo \"Pull Secret not found, exiting...\" exit 1 fi dnf -y install podman httpd httpd-tools jq skopeo libseccomp-devel export IP=USD(ip -o addr show USDPRIMARY_NIC | head -1 | awk '{print USD4}' | cut -d'/' -f1) REGISTRY_NAME=registry.USD(hostname --long) REGISTRY_USER=dummy REGISTRY_PASSWORD=dummy KEY=USD(echo -n USDREGISTRY_USER:USDREGISTRY_PASSWORD | base64) echo \"{\\\"auths\\\": {\\\"USDREGISTRY_NAME:5000\\\": {\\\"auth\\\": \\\"USDKEY\\\", \\\"email\\\": \\\"[email protected]\\\"}}}\" > /root/disconnected_pull.json mv USD{PULL_SECRET} /root/openshift_pull.json.old jq \".auths += {\\\"USDREGISTRY_NAME:5000\\\": {\\\"auth\\\": \\\"USDKEY\\\",\\\"email\\\": \\\"[email protected]\\\"}}\" < /root/openshift_pull.json.old > USDPULL_SECRET mkdir -p /opt/registry/{auth,certs,data,conf} cat <<EOF > /opt/registry/conf/config.yml version: 0.1 log: fields: service: registry storage: cache: blobdescriptor: inmemory filesystem: rootdirectory: /var/lib/registry delete: enabled: true http: addr: :5000 headers: X-Content-Type-Options: [nosniff] health: storagedriver: enabled: true interval: 10s threshold: 3 compatibility: schema1: enabled: true EOF openssl req -newkey rsa:4096 -nodes -sha256 -keyout /opt/registry/certs/domain.key -x509 -days 3650 -out /opt/registry/certs/domain.crt -subj \"/C=US/ST=Madrid/L=San Bernardo/O=Karmalabs/OU=Guitar/CN=USDREGISTRY_NAME\" -addext \"subjectAltName=DNS:USDREGISTRY_NAME\" cp /opt/registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ update-ca-trust extract htpasswd -bBc /opt/registry/auth/htpasswd USDREGISTRY_USER USDREGISTRY_PASSWORD create --name registry --net host --security-opt label=disable --replace -v /opt/registry/data:/var/lib/registry:z -v /opt/registry/auth:/auth:z -v /opt/registry/conf/config.yml:/etc/docker/registry/config.yml -e \"REGISTRY_AUTH=htpasswd\" -e \"REGISTRY_AUTH_HTPASSWD_REALM=Registry\" -e \"REGISTRY_HTTP_SECRET=ALongRandomSecretForRegistry\" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -v /opt/registry/certs:/certs:z -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key docker.io/library/registry:latest [ \"USD?\" == \"0\" ] || !! systemctl enable --now registry",
"chmod u+x USD{HOME}/registry.sh",
"USD{HOME}/registry.sh",
"systemctl status",
"systemctl start",
"systemctl stop",
"kcli create network -c 192.168.126.0/24 -P dhcp=false -P dns=false -d 2620:52:0:1306::0/64 --domain dns.base.domain.name --nodhcp dual",
"kcli list network Listing Networks +---------+--------+---------------------+-------+------------------+------+ | Network | Type | Cidr | Dhcp | Domain | Mode | +---------+--------+---------------------+-------+------------------+------+ | default | routed | 192.168.122.0/24 | True | default | nat | | ipv4 | routed | 2620:52:0:1306::/64 | False | dns.base.domain.name | nat | | ipv4 | routed | 192.168.125.0/24 | False | dns.base.domain.name | nat | | ipv6 | routed | 2620:52:0:1305::/64 | False | dns.base.domain.name | nat | +---------+--------+---------------------+-------+------------------+------+",
"kcli info network ipv6 Providing information about network ipv6 cidr: 2620:52:0:1306::/64 dhcp: false domain: dns.base.domain.name mode: nat plan: kvirt type: routed",
"plan: hub-dual force: true version: stable tag: \"4.x.y-x86_64\" 1 cluster: \"hub-dual\" dualstack: true domain: dns.base.domain.name api_ip: 192.168.126.10 ingress_ip: 192.168.126.11 service_networks: - 172.30.0.0/16 - fd02::/112 cluster_networks: - 10.132.0.0/14 - fd01::/48 disconnected_url: registry.dns.base.domain.name:5000 disconnected_update: true disconnected_user: dummy disconnected_password: dummy disconnected_operators_version: v4.14 disconnected_operators: - name: metallb-operator - name: lvms-operator channels: - name: stable-4.14 disconnected_extra_images: - quay.io/user-name/trbsht:latest - quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 dualstack: true disk_size: 200 extra_disks: [200] memory: 48000 numcpus: 16 ctlplanes: 3 workers: 0 manifests: extra-manifests metal3: true network: dual users_dev: developer users_devpassword: developer users_admin: admin users_adminpassword: admin metallb_pool: dual-virtual-network metallb_ranges: - 192.168.126.150-192.168.126.190 metallb_autoassign: true apps: - users - lvms-operator - metallb-operator vmrules: - hub-bootstrap: nets: - name: ipv6 mac: aa:aa:aa:aa:10:07 - hub-ctlplane-0: nets: - name: ipv6 mac: aa:aa:aa:aa:10:01 - hub-ctlplane-1: nets: - name: ipv6 mac: aa:aa:aa:aa:10:02 - hub-ctlplane-2: nets: - name: ipv6 mac: aa:aa:aa:aa:10:03",
"kcli create cluster openshift --pf mgmt-compact-hub-dual.yaml",
"adm -a USD{LOCAL_SECRET_JSON} release extract --command=openshift-install \"USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}\"",
"#!/bin/bash WEBSRV_FOLDER=/opt/srv ROOTFS_IMG_URL=\"USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.pxe.rootfs.location')\" 1 LIVE_ISO_URL=\"USD(./openshift-install coreos print-stream-json | jq -r '.architectures.x86_64.artifacts.metal.formats.iso.disk.location')\" 2 mkdir -p USD{WEBSRV_FOLDER}/images curl -Lk USD{ROOTFS_IMG_URL} -o USD{WEBSRV_FOLDER}/images/USD{ROOTFS_IMG_URL##*/} curl -Lk USD{LIVE_ISO_URL} -o USD{WEBSRV_FOLDER}/images/USD{LIVE_ISO_URL##*/} chmod -R 755 USD{WEBSRV_FOLDER}/* ## Run Webserver ps --noheading | grep -q websrv-ai if [[ USD? == 0 ]];then echo \"Launching Registry pod...\" /usr/bin/podman run --name websrv-ai --net host -v /opt/srv:/usr/local/apache2/htdocs:z quay.io/alosadag/httpd:p8080 fi",
"apiVersion: mirror.openshift.io/v1alpha2 kind: ImageSetConfiguration storageConfig: registry: imageURL: registry.dns.base.domain.name:5000/openshift/release/metadata:latest mirror: platform: channels: - name: candidate-4.14 minVersion: 4.x.y-x86_64 1 maxVersion: 4.x.y-x86_64 type: ocp graph: true additionalImages: - name: quay.io/karmab/origin-keepalived-ipfailover:latest - name: quay.io/karmab/kubectl:latest - name: quay.io/karmab/haproxy:latest - name: quay.io/karmab/mdns-publisher:latest - name: quay.io/karmab/origin-coredns:latest - name: quay.io/karmab/curl:latest - name: quay.io/karmab/kcli:latest - name: quay.io/user-name/trbsht:latest - name: quay.io/user-name/hypershift:BMSelfManage-v4.14-rc-v3 - name: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 operators: - catalog: registry.redhat.io/redhat/redhat-operator-index:v4.14 packages: - name: lvms-operator - name: local-storage-operator - name: odf-csi-addons-operator - name: odf-operator - name: mcg-operator - name: ocs-operator - name: metallb-operator",
"oc-mirror --source-skip-tls --config imagesetconfig.yaml docker://USD{REGISTRY}",
"REGISTRY=registry.USD(hostname --long):5000 adm release mirror --from=registry.ci.openshift.org/ocp/release:4.x.y-x86_64 --to=USD{REGISTRY}/openshift/release --to-release-image=USD{REGISTRY}/openshift/release-images:registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64",
"get packagemanifest",
"apply -f oc-mirror-workspace/results-XXXXXX/imageContentSourcePolicy.yaml",
"apply -f catalogSource-XXXXXXXX-index.yaml",
"--- apiVersion: v1 kind: ConfigMap metadata: name: custom-registries namespace: multicluster-engine labels: app: assisted-service data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registries.conf: | unqualified-search-registries = [\"registry.access.redhat.com\", \"docker.io\"] [[registry]] prefix = \"\" location = \"registry.redhat.io/openshift4\" mirror-by-digest-only = true [[registry.mirror]] location = \"registry.dns.base.domain.name:5000/openshift4\" 1 [[registry]] prefix = \"\" location = \"registry.redhat.io/rhacm2\" mirror-by-digest-only = true",
"--- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: annotations: unsupported.agent-install.openshift.io/assisted-service-configmap: assisted-service-config 1 name: agent namespace: multicluster-engine spec: mirrorRegistryRef: name: custom-registries 2 databaseStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 10Gi filesystemStorage: storageClassName: lvms-vg1 accessModes: - ReadWriteOnce resources: requests: storage: 20Gi osImages: 3 - cpuArchitecture: x86_64 openshiftVersion: \"4.14\" rootFSUrl: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live-rootfs.x86_64.img 4 url: http://registry.dns.base.domain.name:8080/images/rhcos-414.92.202308281054-0-live.x86_64.iso version: 414.92.202308281054-0",
"apply -f agentServiceConfig.yaml",
"assisted-image-service-0 1/1 Running 2 11d 1 assisted-service-668b49548-9m7xw 2/2 Running 5 11d 2",
"## REGISTRY_CERT_PATH=<PATH/TO/YOUR/CERTIFICATE/FILE> export REGISTRY_CERT_PATH=/opt/registry/certs/domain.crt create configmap user-ca-bundle -n openshift-config --from-file=ca-bundle.crt=USD{REGISTRY_CERT_PATH}",
"## REGISTRY_CERT_PATH=<PATH/TO/YOUR/CERTIFICATE/FILE> export REGISTRY_CERT_PATH=/opt/registry/certs/domain.crt export TMP_FILE=USD(mktemp) get cm -n openshift-config user-ca-bundle -ojsonpath='{.data.ca-bundle\\.crt}' > USD{TMP_FILE} echo >> USD{TMP_FILE} echo \\#registry.USD(hostname --long) >> USD{TMP_FILE} cat USD{REGISTRY_CERT_PATH} >> USD{TMP_FILE} create configmap user-ca-bundle -n openshift-config --from-file=ca-bundle.crt=USD{TMP_FILE} --dry-run=client -o yaml | kubectl apply -f -",
"--- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters-hosted-dual spec: {} status: {} --- apiVersion: v1 kind: Namespace metadata: creationTimestamp: null name: clusters spec: {} status: {}",
"--- apiVersion: v1 data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- kind: ConfigMap metadata: name: user-ca-bundle namespace: clusters --- apiVersion: v1 data: .dockerconfigjson: xxxxxxxxx kind: Secret metadata: creationTimestamp: null name: hosted-dual-pull-secret namespace: clusters --- apiVersion: v1 kind: Secret metadata: name: sshkey-cluster-hosted-dual namespace: clusters stringData: id_rsa.pub: ssh-rsa xxxxxxxxx --- apiVersion: v1 data: key: nTPtVBEt03owkrKhIdmSW8jrWRxU57KO/fnZa8oaG0Y= kind: Secret metadata: creationTimestamp: null name: hosted-dual-etcd-encryption-key namespace: clusters type: Opaque",
"apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: creationTimestamp: null name: capi-provider-role namespace: clusters-hosted-dual rules: - apiGroups: - agent-install.openshift.io resources: - agents verbs: - '*'",
"apiVersion: hypershift.openshift.io/v1beta1 kind: HostedCluster metadata: name: hosted-dual namespace: clusters spec: additionalTrustBundle: name: \"user-ca-bundle\" olmCatalogPlacement: guest imageContentSources: 1 - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev mirrors: - registry.dns.base.domain.name:5000/openshift/release 2 - source: quay.io/openshift-release-dev/ocp-release mirrors: - registry.dns.base.domain.name:5000/openshift/release-images - mirrors: autoscaling: {} controllerAvailabilityPolicy: SingleReplica dns: baseDomain: dns.base.domain.name etcd: managed: storage: persistentVolume: size: 8Gi restoreSnapshotURL: null type: PersistentVolume managementType: Managed fips: false networking: clusterNetwork: - cidr: 10.132.0.0/14 - cidr: fd01::/48 networkType: OVNKubernetes serviceNetwork: - cidr: 172.31.0.0/16 - cidr: fd02::/112 platform: agent: agentNamespace: clusters-hosted-dual type: Agent pullSecret: name: hosted-dual-pull-secret release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 secretEncryption: aescbc: activeKey: name: hosted-dual-etcd-encryption-key type: aescbc services: - service: APIServer servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: OAuthServer servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: OIDC servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: Konnectivity servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort - service: Ignition servicePublishingStrategy: nodePort: address: api.hosted-dual.dns.base.domain.name type: NodePort sshKey: name: sshkey-cluster-hosted-dual status: controlPlaneEndpoint: host: \"\" port: 0",
"adm release info registry.dns.base.domain.name:5000/openshift-release-dev/ocp-release:4.x.y-x86_64 | grep hypershift",
"hypershift sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8",
"pull registry.dns.base.domain.name:5000/openshift-release-dev/ocp-v4.0-art-dev@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8",
"pull registry.dns.base.domain.name:5000/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 Trying to pull registry.dns.base.domain.name:5000/openshift/release@sha256:31149e3e5f8c5e5b5b100ff2d89975cf5f7a73801b2c06c639bf6648766117f8 Getting image source signatures Copying blob d8190195889e skipped: already exists Copying blob c71d2589fba7 skipped: already exists Copying blob d4dc6e74b6ce skipped: already exists Copying blob 97da74cc6d8f skipped: already exists Copying blob b70007a560c9 done Copying config 3a62961e6e done Writing manifest to image destination Storing signatures 3a62961e6ed6edab46d5ec8429ff1f41d6bb68de51271f037c6cb8941a007fde",
"apply -f 01-4.14-hosted_cluster-nodeport.yaml",
"NAME READY STATUS RESTARTS AGE capi-provider-5b57dbd6d5-pxlqc 1/1 Running 0 3m57s catalog-operator-9694884dd-m7zzv 2/2 Running 0 93s cluster-api-f98b9467c-9hfrq 1/1 Running 0 3m57s cluster-autoscaler-d7f95dd5-d8m5d 1/1 Running 0 93s cluster-image-registry-operator-5ff5944b4b-648ht 1/2 Running 0 93s cluster-network-operator-77b896ddc-wpkq8 1/1 Running 0 94s cluster-node-tuning-operator-84956cd484-4hfgf 1/1 Running 0 94s cluster-policy-controller-5fd8595d97-rhbwf 1/1 Running 0 95s cluster-storage-operator-54dcf584b5-xrnts 1/1 Running 0 93s cluster-version-operator-9c554b999-l22s7 1/1 Running 0 95s control-plane-operator-6fdc9c569-t7hr4 1/1 Running 0 3m57s csi-snapshot-controller-785c6dc77c-8ljmr 1/1 Running 0 77s csi-snapshot-controller-operator-7c6674bc5b-d9dtp 1/1 Running 0 93s csi-snapshot-webhook-5b8584875f-2492j 1/1 Running 0 77s dns-operator-6874b577f-9tc6b 1/1 Running 0 94s etcd-0 3/3 Running 0 3m39s hosted-cluster-config-operator-f5cf5c464-4nmbh 1/1 Running 0 93s ignition-server-6b689748fc-zdqzk 1/1 Running 0 95s ignition-server-proxy-54d4bb9b9b-6zkg7 1/1 Running 0 95s ingress-operator-6548dc758b-f9gtg 1/2 Running 0 94s konnectivity-agent-7767cdc6f5-tw782 1/1 Running 0 95s kube-apiserver-7b5799b6c8-9f5bp 4/4 Running 0 3m7s kube-controller-manager-5465bc4dd6-zpdlk 1/1 Running 0 44s kube-scheduler-5dd5f78b94-bbbck 1/1 Running 0 2m36s machine-approver-846c69f56-jxvfr 1/1 Running 0 92s oauth-openshift-79c7bf44bf-j975g 2/2 Running 0 62s olm-operator-767f9584c-4lcl2 2/2 Running 0 93s openshift-apiserver-5d469778c6-pl8tj 3/3 Running 0 2m36s openshift-controller-manager-6475fdff58-hl4f7 1/1 Running 0 95s openshift-oauth-apiserver-dbbc5cc5f-98574 2/2 Running 0 95s openshift-route-controller-manager-5f6997b48f-s9vdc 1/1 Running 0 95s packageserver-67c87d4d4f-kl7qh 2/2 Running 0 93s",
"NAMESPACE NAME VERSION KUBECONFIG PROGRESS AVAILABLE PROGRESSING MESSAGE clusters hosted-dual hosted-admin-kubeconfig Partial True False The hosted control plane is available",
"apiVersion: hypershift.openshift.io/v1beta1 kind: NodePool metadata: creationTimestamp: null name: hosted-dual namespace: clusters spec: arch: amd64 clusterName: hosted-dual management: autoRepair: false 1 upgradeType: InPlace 2 nodeDrainTimeout: 0s platform: type: Agent release: image: registry.dns.base.domain.name:5000/openshift/release-images:4.x.y-x86_64 3 replicas: 0 status: replicas: 0 4",
"apply -f 02-nodepool.yaml",
"NAMESPACE NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE clusters hosted-dual hosted 0 False False 4.x.y-x86_64",
"--- apiVersion: agent-install.openshift.io/v1beta1 kind: InfraEnv metadata: name: hosted-dual namespace: clusters-hosted-dual spec: pullSecretRef: 1 name: pull-secret sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDk7ICaUE+/k4zTpxLk4+xFdHi4ZuDi5qjeF52afsNkw0w/glILHhwpL5gnp5WkRuL8GwJuZ1VqLC9EKrdmegn4MrmUlq7WTsP0VFOZFBfq2XRUxo1wrRdor2z0Bbh93ytR+ZsDbbLlGngXaMa0Vbt+z74FqlcajbHTZ6zBmTpBVq5RHtDPgKITdpE1fongp7+ZXQNBlkaavaqv8bnyrP4BWahLP4iO9/xJF9lQYboYwEEDzmnKLMW1VtCE6nJzEgWCufACTbxpNS7GvKtoHT/OVzw8ArEXhZXQUS1UY8zKsX2iXwmyhw5Sj6YboA8WICs4z+TrFP89LmxXY0j6536TQFyRz1iB4WWvCbH5n6W+ABV2e8ssJB1AmEy8QYNwpJQJNpSxzoKBjI73XxvPYYC/IjPFMySwZqrSZCkJYqQ023ySkaQxWZT7in4KeMu7eS2tC+Kn4deJ7KwwUycx8n6RHMeD8Qg9flTHCv3gmab8JKZJqN3hW1D378JuvmIX4V0= 2",
"apply -f 03-infraenv.yaml",
"NAMESPACE NAME ISO CREATED AT clusters-hosted-dual hosted 2023-09-11T15:14:10Z",
"kcli delete plan hosted-dual",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-dual -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"dual\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:11:01\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa1101 -P name=hosted-dual-worker0",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-dual -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"dual\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:11:02\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa1102 -P name=hosted-dual-worker1",
"kcli create vm -P start=False -P uefi_legacy=true -P plan=hosted-dual -P memory=8192 -P numcpus=16 -P disks=[200,200] -P nets=[\"{\\\"name\\\": \\\"dual\\\", \\\"mac\\\": \\\"aa:aa:aa:aa:11:03\\\"}\"] -P uuid=aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa1103 -P name=hosted-dual-worker2",
"systemctl restart ksushy",
"+---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+ | Name | Status | Ip | Source | Plan | Profile | +---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+ | hosted-worker0 | down | | | hosted-dual | kvirt | | hosted-worker1 | down | | | hosted-dual | kvirt | | hosted-worker2 | down | | | hosted-dual | kvirt | +---------------------+--------+-------------------+----------------------------------------------------+-------------+---------+",
"--- apiVersion: v1 kind: Secret metadata: name: hosted-dual-worker0-bmc-secret namespace: clusters-hosted-dual data: password: YWRtaW4= username: YWRtaW4= type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: hosted-dual-worker0 namespace: clusters-hosted-dual labels: infraenvs.agent-install.openshift.io: hosted-dual 1 annotations: inspect.metal3.io: disabled bmac.agent-install.openshift.io/hostname: hosted-dual-worker0 2 spec: automatedCleaningMode: disabled 3 bmc: disableCertificateVerification: true 4 address: redfish-virtualmedia://[192.168.126.1]:9000/redfish/v1/Systems/local/hosted-dual-worker0 5 credentialsName: hosted-dual-worker0-bmc-secret 6 bootMACAddress: aa:aa:aa:aa:02:11 7 online: true 8",
"apply -f 04-bmh.yaml",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 registering true 2s clusters-hosted hosted-worker1 registering true 2s clusters-hosted hosted-worker2 registering true 2s",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 provisioning true 16s clusters-hosted hosted-worker1 provisioning true 16s clusters-hosted hosted-worker2 provisioning true 16s",
"NAMESPACE NAME STATE CONSUMER ONLINE ERROR AGE clusters-hosted hosted-worker0 provisioned true 67s clusters-hosted hosted-worker1 provisioned true 67s clusters-hosted hosted-worker2 provisioned true 67s",
"NAMESPACE NAME CLUSTER APPROVED ROLE STAGE clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0411 true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0412 true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0413 true auto-assign",
"-n clusters scale nodepool hosted-dual --replicas 3",
"NAMESPACE NAME CLUSTER APPROVED ROLE STAGE clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0411 hosted true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0412 hosted true auto-assign clusters-hosted aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa0413 hosted true auto-assign",
"NAMESPACE NAME CLUSTER DESIRED NODES CURRENT NODES AUTOSCALING AUTOREPAIR VERSION UPDATINGVERSION UPDATINGCONFIG MESSAGE clusters hosted hosted 3 False False 4.x.y-x86_64 Minimum availability requires 3 replicas, current 0 available",
"export KUBECONFIG=/root/.kcli/clusters/hub-ipv4/auth/kubeconfig",
"watch \"oc get pod -n hypershift;echo;echo;oc get pod -n clusters-hosted-ipv4;echo;echo;oc get bmh -A;echo;echo;oc get agent -A;echo;echo;oc get infraenv -A;echo;echo;oc get hostedcluster -A;echo;echo;oc get nodepool -A;echo;echo;\"",
"get secret -n clusters-hosted-ipv4 admin-kubeconfig -o jsonpath='{.data.kubeconfig}' |base64 -d > /root/hc_admin_kubeconfig.yaml",
"export KUBECONFIG=/root/hc_admin_kubeconfig.yaml",
"watch \"oc get clusterversion,nodes,co\"",
"apiVersion: cluster.open-cluster-management.io/v1 kind: ManagedCluster metadata: annotations: import.open-cluster-management.io/hosting-cluster-name: local-cluster import.open-cluster-management.io/klusterlet-deploy-mode: Hosted open-cluster-management/created-via: hypershift labels: cloud: auto-detect cluster.open-cluster-management.io/clusterset: default name: <cluster_name> vendor: OpenShift name: <cluster_name> spec: hubAcceptsClient: true leaseDurationSeconds: 60",
"apply -f <file_name>",
"apiVersion: agent.open-cluster-management.io/v1 kind: KlusterletAddonConfig metadata: name: <cluster_name> namespace: <cluster_name> spec: clusterName: <cluster_name> clusterNamespace: <cluster_name> clusterLabels: cloud: auto-detect vendor: auto-detect applicationManager: enabled: true certPolicyController: enabled: true policyController: enabled: true searchCollector: enabled: false",
"apply -f <file_name>",
"get managedcluster <cluster_name>",
"edit addondeploymentconfig hypershift-addon-deploy-config -n multicluster-engine",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: AddOnDeploymentConfig metadata: name: hypershift-addon-deploy-config namespace: multicluster-engine spec: customizedVariables: - name: hcMaxNumber value: \"80\" - name: hcThresholdNumber value: \"60\" - name: autoImportDisabled value: \"true\"",
"patch mce multiclusterengine --type=merge -p '{\"spec\":{\"overrides\":{\"components\":[{\"name\":\"hypershift\",\"enabled\": true}]}}}' 1",
"get mce multiclusterengine -o yaml 1",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: hypershift enabled: true - name: hypershift-local-hosting enabled: true",
"apiVersion: addon.open-cluster-management.io/v1alpha1 kind: ManagedClusterAddOn metadata: name: hypershift-addon namespace: local-cluster spec: installNamespace: open-cluster-management-agent-addon",
"apply -f <filename>",
"get managedclusteraddons -n local-cluster hypershift-addon",
"NAME AVAILABLE DEGRADED PROGRESSING hypershift-addon True",
"get hostedcluster -A",
"patch mce multiclusterengine --type=merge -p '{\"spec\":{\"overrides\":{\"components\":[{\"name\":\"hypershift-local-hosting\",\"enabled\": false}]}}}' 1",
"patch mce multiclusterengine --type=merge -p '{\"spec\":{\"overrides\":{\"components\":[{\"name\":\"hypershift\",\"enabled\": false}]}}}' 1",
"get mce multiclusterengine -o yaml 1",
"apiVersion: multicluster.openshift.io/v1 kind: MultiClusterEngine metadata: name: multiclusterengine spec: overrides: components: - name: hypershift enabled: false - name: hypershift-local-hosting enabled: false",
"GET /cluster.open-cluster-management.io/v1/managedclusters",
"POST /cluster.open-cluster-management.io/v1/managedclusters",
"{ \"apiVersion\" : \"cluster.open-cluster-management.io/v1\", \"kind\" : \"ManagedCluster\", \"metadata\" : { \"labels\" : { \"vendor\" : \"OpenShift\" }, \"name\" : \"cluster1\" }, \"spec\": { \"hubAcceptsClient\": true, \"managedClusterClientConfigs\": [ { \"caBundle\": \"test\", \"url\": \"https://test.com\" } ] }, \"status\" : { } }",
"GET /cluster.open-cluster-management.io/v1/managedclusters/{cluster_name}",
"DELETE /cluster.open-cluster-management.io/v1/managedclusters/{cluster_name}",
"\"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?USD\"",
"GET /cluster.open-cluster-management.io/v1beta2/managedclustersets",
"POST /cluster.open-cluster-management.io/v1beta2/managedclustersets",
"{ \"apiVersion\" : \"cluster.open-cluster-management.io/v1beta2\", \"kind\" : \"ManagedClusterSet\", \"metadata\" : { \"name\" : \"clusterset1\" }, \"spec\": { }, \"status\" : { } }",
"GET /cluster.open-cluster-management.io/v1beta2/managedclustersets/{clusterset_name}",
"DELETE /cluster.open-cluster-management.io/v1beta2/managedclustersets/{clusterset_name}",
"GET /cluster.open-cluster-management.io/v1beta2/namespaces/{namespace}/managedclustersetbindings",
"POST /cluster.open-cluster-management.io/v1beta2/namespaces/{namespace}/managedclustersetbindings",
"{ \"apiVersion\" : \"cluster.open-cluster-management.io/v1\", \"kind\" : \"ManagedClusterSetBinding\", \"metadata\" : { \"name\" : \"clusterset1\", \"namespace\" : \"ns1\" }, \"spec\": { \"clusterSet\": \"clusterset1\" }, \"status\" : { } }",
"GET /cluster.open-cluster-management.io/v1beta2/namespaces/{namespace}/managedclustersetbindings/{clustersetbinding_name}",
"DELETE /cluster.open-cluster-management.io/v1beta2/managedclustersetbindings/{clustersetbinding_name}",
"GET /managedclusters.clusterview.open-cluster-management.io",
"LIST /managedclusters.clusterview.open-cluster-management.io",
"{ \"apiVersion\" : \"clusterview.open-cluster-management.io/v1alpha1\", \"kind\" : \"ClusterView\", \"metadata\" : { \"name\" : \"<user_ID>\" }, \"spec\": { }, \"status\" : { } }",
"WATCH /managedclusters.clusterview.open-cluster-management.io",
"GET /managedclustersets.clusterview.open-cluster-management.io",
"LIST /managedclustersets.clusterview.open-cluster-management.io",
"WATCH /managedclustersets.clusterview.open-cluster-management.io",
"POST /authentication.open-cluster-management.io/v1beta1/managedserviceaccounts",
"apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.14.0 name: managedserviceaccounts.authentication.open-cluster-management.io spec: group: authentication.open-cluster-management.io names: kind: ManagedServiceAccount listKind: ManagedServiceAccountList plural: managedserviceaccounts singular: managedserviceaccount scope: Namespaced versions: - deprecated: true deprecationWarning: authentication.open-cluster-management.io/v1alpha1 ManagedServiceAccount is deprecated; use authentication.open-cluster-management.io/v1beta1 ManagedServiceAccount; version v1alpha1 will be removed in the next release name: v1alpha1 schema: openAPIV3Schema: description: ManagedServiceAccount is the Schema for the managedserviceaccounts API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: ManagedServiceAccountSpec defines the desired state of ManagedServiceAccount properties: rotation: description: Rotation is the policy for rotation the credentials. properties: enabled: default: true description: |- Enabled prescribes whether the ServiceAccount token will be rotated from the upstream type: boolean validity: default: 8640h0m0s description: Validity is the duration for which the signed ServiceAccount token is valid. type: string type: object ttlSecondsAfterCreation: description: |- ttlSecondsAfterCreation limits the lifetime of a ManagedServiceAccount. If the ttlSecondsAfterCreation field is set, the ManagedServiceAccount will be automatically deleted regardless of the ManagedServiceAccount's status. When the ManagedServiceAccount is deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the ManagedServiceAccount won't be automatically deleted. If this field is set to zero, the ManagedServiceAccount becomes eligible for deletion immediately after its creation. In order to use ttlSecondsAfterCreation, the EphemeralIdentity feature gate must be enabled. exclusiveMinimum: true format: int32 minimum: 0 type: integer required: - rotation type: object status: description: ManagedServiceAccountStatus defines the observed state of ManagedServiceAccount properties: conditions: description: Conditions is the condition list. items: description: \"Condition contains details for one aspect of the current state of this API Resource.\\n---\\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\\n\\n\\n\\ttype FooStatus struct{\\n\\t // Represents the observations of a foo's current state.\\n\\t // Known .status.conditions.type are: \\\"Available\\\", \\\"Progressing\\\", and \\\"Degraded\\\"\\n\\t // +patchMergeKey=type\\n\\t // +patchStrategy=merge\\n\\t // +listType=map\\n\\t \\ // +listMapKey=type\\n\\t Conditions []metav1.Condition `json:\\\"conditions,omitempty\\\" patchStrategy:\\\"merge\\\" patchMergeKey:\\\"type\\\" protobuf:\\\"bytes,1,rep,name=conditions\\\"`\\n\\n\\n\\t \\ // other fields\\n\\t}\" properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: description: |- observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: description: |- reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?USD type: string status: description: status of the condition, one of True, False, Unknown. enum: - \"True\" - \"False\" - Unknown type: string type: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])USD type: string required: - lastTransitionTime - message - reason - status - type type: object type: array expirationTimestamp: description: ExpirationTimestamp is the time when the token will expire. format: date-time type: string tokenSecretRef: description: |- TokenSecretRef is a reference to the corresponding ServiceAccount's Secret, which stores the CA certficate and token from the managed cluster. properties: lastRefreshTimestamp: description: |- LastRefreshTimestamp is the timestamp indicating when the token in the Secret is refreshed. format: date-time type: string name: description: Name is the name of the referenced secret. type: string required: - lastRefreshTimestamp - name type: object type: object type: object served: true storage: false subresources: status: {} - name: v1beta1 schema: openAPIV3Schema: description: ManagedServiceAccount is the Schema for the managedserviceaccounts API properties: apiVersion: description: |- APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: description: |- Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: description: ManagedServiceAccountSpec defines the desired state of ManagedServiceAccount properties: rotation: description: Rotation is the policy for rotation the credentials. properties: enabled: default: true description: |- Enabled prescribes whether the ServiceAccount token will be rotated before it expires. Deprecated: All ServiceAccount tokens will be rotated before they expire regardless of this field. type: boolean validity: default: 8640h0m0s description: Validity is the duration of validity for requesting the signed ServiceAccount token. type: string type: object ttlSecondsAfterCreation: description: |- ttlSecondsAfterCreation limits the lifetime of a ManagedServiceAccount. If the ttlSecondsAfterCreation field is set, the ManagedServiceAccount will be automatically deleted regardless of the ManagedServiceAccount's status. When the ManagedServiceAccount is deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the ManagedServiceAccount won't be automatically deleted. If this field is set to zero, the ManagedServiceAccount becomes eligible for deletion immediately after its creation. In order to use ttlSecondsAfterCreation, the EphemeralIdentity feature gate must be enabled. exclusiveMinimum: true format: int32 minimum: 0 type: integer required: - rotation type: object status: description: ManagedServiceAccountStatus defines the observed state of ManagedServiceAccount properties: conditions: description: Conditions is the condition list. items: description: \"Condition contains details for one aspect of the current state of this API Resource.\\n---\\nThis struct is intended for direct use as an array at the field path .status.conditions. For example,\\n\\n\\n\\ttype FooStatus struct{\\n\\t // Represents the observations of a foo's current state.\\n\\t // Known .status.conditions.type are: \\\"Available\\\", \\\"Progressing\\\", and \\\"Degraded\\\"\\n\\t // +patchMergeKey=type\\n\\t // +patchStrategy=merge\\n\\t // +listType=map\\n\\t \\ // +listMapKey=type\\n\\t Conditions []metav1.Condition `json:\\\"conditions,omitempty\\\" patchStrategy:\\\"merge\\\" patchMergeKey:\\\"type\\\" protobuf:\\\"bytes,1,rep,name=conditions\\\"`\\n\\n\\n\\t \\ // other fields\\n\\t}\" properties: lastTransitionTime: description: |- lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: description: |- message is a human readable message indicating details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: description: |- observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: description: |- reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?USD type: string status: description: status of the condition, one of True, False, Unknown. enum: - \"True\" - \"False\" - Unknown type: string type: description: |- type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])USD type: string required: - lastTransitionTime - message - reason - status - type type: object type: array expirationTimestamp: description: ExpirationTimestamp is the time when the token will expire. format: date-time type: string tokenSecretRef: description: |- TokenSecretRef is a reference to the corresponding ServiceAccount's Secret, which stores the CA certficate and token from the managed cluster. properties: lastRefreshTimestamp: description: |- LastRefreshTimestamp is the timestamp indicating when the token in the Secret is refreshed. format: date-time type: string name: description: Name is the name of the referenced secret. type: string required: - lastRefreshTimestamp - name type: object type: object type: object served: true storage: true subresources: status: {}",
"GET /authentication.open-cluster-management.io/v1beta1/namespaces/{namespace}/managedserviceaccounts/{managedserviceaccount_name}",
"DELETE /authentication.open-cluster-management.io/v1beta1/namespaces/{namespace}/managedserviceaccounts/{managedserviceaccount_name}",
"POST /apis/multicluster.openshift.io/v1alpha1/multiclusterengines",
"{ \"apiVersion\": \"apiextensions.k8s.io/v1\", \"kind\": \"CustomResourceDefinition\", \"metadata\": { \"annotations\": { \"controller-gen.kubebuilder.io/version\": \"v0.4.1\" }, \"creationTimestamp\": null, \"name\": \"multiclusterengines.multicluster.openshift.io\" }, \"spec\": { \"group\": \"multicluster.openshift.io\", \"names\": { \"kind\": \"MultiClusterEngine\", \"listKind\": \"MultiClusterEngineList\", \"plural\": \"multiclusterengines\", \"shortNames\": [ \"mce\" ], \"singular\": \"multiclusterengine\" }, \"scope\": \"Cluster\", \"versions\": [ { \"additionalPrinterColumns\": [ { \"description\": \"The overall state of the MultiClusterEngine\", \"jsonPath\": \".status.phase\", \"name\": \"Status\", \"type\": \"string\" }, { \"jsonPath\": \".metadata.creationTimestamp\", \"name\": \"Age\", \"type\": \"date\" } ], \"name\": \"v1alpha1\", \"schema\": { \"openAPIV3Schema\": { \"description\": \"MultiClusterEngine is the Schema for the multiclusterengines\\nAPI\", \"properties\": { \"apiVersion\": { \"description\": \"APIVersion defines the versioned schema of this representation\\nof an object. Servers should convert recognized schemas to the latest\\ninternal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\", \"type\": \"string\" }, \"kind\": { \"description\": \"Kind is a string value representing the REST resource this\\nobject represents. Servers may infer this from the endpoint the client\\nsubmits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\", \"type\": \"string\" }, \"metadata\": { \"type\": \"object\" }, \"spec\": { \"description\": \"MultiClusterEngineSpec defines the desired state of MultiClusterEngine\", \"properties\": { \"imagePullSecret\": { \"description\": \"Override pull secret for accessing MultiClusterEngine\\noperand and endpoint images\", \"type\": \"string\" }, \"nodeSelector\": { \"additionalProperties\": { \"type\": \"string\" }, \"description\": \"Set the nodeselectors\", \"type\": \"object\" }, \"targetNamespace\": { \"description\": \"Location where MCE resources will be placed\", \"type\": \"string\" }, \"tolerations\": { \"description\": \"Tolerations causes all components to tolerate any taints.\", \"items\": { \"description\": \"The pod this Toleration is attached to tolerates any\\ntaint that matches the triple <key,value,effect> using the matching\\noperator <operator>.\", \"properties\": { \"effect\": { \"description\": \"Effect indicates the taint effect to match. Empty\\nmeans match all taint effects. When specified, allowed values\\nare NoSchedule, PreferNoSchedule and NoExecute.\", \"type\": \"string\" }, \"key\": { \"description\": \"Key is the taint key that the toleration applies\\nto. Empty means match all taint keys. If the key is empty,\\noperator must be Exists; this combination means to match all\\nvalues and all keys.\", \"type\": \"string\" }, \"operator\": { \"description\": \"Operator represents a key's relationship to the\\nvalue. Valid operators are Exists and Equal. Defaults to Equal.\\nExists is equivalent to wildcard for value, so that a pod\\ncan tolerate all taints of a particular category.\", \"type\": \"string\" }, \"tolerationSeconds\": { \"description\": \"TolerationSeconds represents the period of time\\nthe toleration (which must be of effect NoExecute, otherwise\\nthis field is ignored) tolerates the taint. By default, it\\nis not set, which means tolerate the taint forever (do not\\nevict). Zero and negative values will be treated as 0 (evict\\nimmediately) by the system.\", \"format\": \"int64\", \"type\": \"integer\" }, \"value\": { \"description\": \"Value is the taint value the toleration matches\\nto. If the operator is Exists, the value should be empty,\\notherwise just a regular string.\", \"type\": \"string\" } }, \"type\": \"object\" }, \"type\": \"array\" } }, \"type\": \"object\" }, \"status\": { \"description\": \"MultiClusterEngineStatus defines the observed state of MultiClusterEngine\", \"properties\": { \"components\": { \"items\": { \"description\": \"ComponentCondition contains condition information for\\ntracked components\", \"properties\": { \"kind\": { \"description\": \"The resource kind this condition represents\", \"type\": \"string\" }, \"lastTransitionTime\": { \"description\": \"LastTransitionTime is the last time the condition\\nchanged from one status to another.\", \"format\": \"date-time\", \"type\": \"string\" }, \"message\": { \"description\": \"Message is a human-readable message indicating\\ndetails about the last status change.\", \"type\": \"string\" }, \"name\": { \"description\": \"The component name\", \"type\": \"string\" }, \"reason\": { \"description\": \"Reason is a (brief) reason for the condition's\\nlast status change.\", \"type\": \"string\" }, \"status\": { \"description\": \"Status is the status of the condition. One of True,\\nFalse, Unknown.\", \"type\": \"string\" }, \"type\": { \"description\": \"Type is the type of the cluster condition.\", \"type\": \"string\" } }, \"type\": \"object\" }, \"type\": \"array\" }, \"conditions\": { \"items\": { \"properties\": { \"lastTransitionTime\": { \"description\": \"LastTransitionTime is the last time the condition\\nchanged from one status to another.\", \"format\": \"date-time\", \"type\": \"string\" }, \"lastUpdateTime\": { \"description\": \"The last time this condition was updated.\", \"format\": \"date-time\", \"type\": \"string\" }, \"message\": { \"description\": \"Message is a human-readable message indicating\\ndetails about the last status change.\", \"type\": \"string\" }, \"reason\": { \"description\": \"Reason is a (brief) reason for the condition's\\nlast status change.\", \"type\": \"string\" }, \"status\": { \"description\": \"Status is the status of the condition. One of True,\\nFalse, Unknown.\", \"type\": \"string\" }, \"type\": { \"description\": \"Type is the type of the cluster condition.\", \"type\": \"string\" } }, \"type\": \"object\" }, \"type\": \"array\" }, \"phase\": { \"description\": \"Latest observed overall state\", \"type\": \"string\" } }, \"type\": \"object\" } }, \"type\": \"object\" } }, \"served\": true, \"storage\": true, \"subresources\": { \"status\": {} } } ] }, \"status\": { \"acceptedNames\": { \"kind\": \"\", \"plural\": \"\" }, \"conditions\": [], \"storedVersions\": [] } }",
"GET /apis/multicluster.openshift.io/v1alpha1/multiclusterengines",
"DELETE /apis/multicluster.openshift.io/v1alpha1/multiclusterengines/{name}",
"GET /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placements",
"POST /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placements",
"{ \"apiVersion\" : \"cluster.open-cluster-management.io/v1beta1\", \"kind\" : \"Placement\", \"metadata\" : { \"name\" : \"placement1\", \"namespace\": \"ns1\" }, \"spec\": { \"predicates\": [ { \"requiredClusterSelector\": { \"labelSelector\": { \"matchLabels\": { \"vendor\": \"OpenShift\" } } } } ] }, \"status\" : { } }",
"GET /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placements/{placement_name}",
"DELETE /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placements/{placement_name}",
"GET /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placementdecisions",
"POST /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placementdecisions",
"{ \"apiVersion\" : \"cluster.open-cluster-management.io/v1beta1\", \"kind\" : \"PlacementDecision\", \"metadata\" : { \"labels\" : { \"cluster.open-cluster-management.io/placement\" : \"placement1\" }, \"name\" : \"placement1-decision1\", \"namespace\": \"ns1\" }, \"status\" : { } }",
"GET /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placementdecisions/{placementdecision_name}",
"DELETE /cluster.open-cluster-management.io/v1beta1/namespaces/{namespace}/placementdecisions/{placementdecision_name}",
"<your-directory>/cluster-scoped-resources/gather-managed.log>",
"adm must-gather --image=registry.redhat.io/multicluster-engine/must-gather-rhel9:v2.6 --dest-dir=<directory>",
"REGISTRY=registry.example.com:5000 IMAGE=USDREGISTRY/multicluster-engine/must-gather-rhel9@sha256:ff9f37eb400dc1f7d07a9b6f2da9064992934b69847d17f59e385783c071b9d8> adm must-gather --image=USDIMAGE --dest-dir=./data",
"adm must-gather --image=registry.redhat.io/multicluster-engine/must-gather-rhel9:<v2.x> /usr/bin/gather hosted-cluster-namespace=HOSTEDCLUSTERNAMESPACE hosted-cluster-name=HOSTEDCLUSTERNAME",
"adm must-gather --image=registry.redhat.io/multicluster-engine/must-gather-rhel9:<v2.x> /usr/bin/gather hosted-cluster-namespace=HOSTEDCLUSTERNAMESPACE hosted-cluster-name=HOSTEDCLUSTERNAME --dest-dir=NAME ; tar -cvzf NAME.tgz NAME",
"REGISTRY=registry.example.com:5000 IMAGE=USDREGISTRY/multicluster-engine/must-gather-rhel9@sha256:ff9f37eb400dc1f7d07a9b6f2da9064992934b69847d17f59e385783c071b9d8 adm must-gather --image=USDIMAGE /usr/bin/gather hosted-cluster-namespace=HOSTEDCLUSTERNAMESPACE hosted-cluster-name=HOSTEDCLUSTERNAME --dest-dir=./data",
"This host is pending user action. Host timed out when pulling ignition. Check the host console... Rebooting",
"info: networking config is defined in the real root info: will not attempt to propagate initramfs networking",
"\"bmac.agent-install.openshift.io/installer-args\": \"[\\\"--append-karg\\\", \\\"coreos.force_persist_ip\\\"]\"",
"2024-02-22T09:56:19-05:00 ERROR HostedCluster deletion failed {\"namespace\": \"clusters\", \"name\": \"hosted-0\", \"error\": \"context deadline exceeded\"} 2024-02-22T09:56:19-05:00 ERROR Failed to destroy cluster {\"error\": \"context deadline exceeded\"}",
"get machine -n <hosted_cluster_namespace>",
"NAMESPACE NAME CLUSTER NODENAME PROVIDERID PHASE AGE VERSION clusters-hosted-0 hosted-0-9gg8b hosted-0-nhdbp Deleting 10h 4.15.0-rc.8",
"edit machines -n <hosted_cluster_namespace>",
"get agentmachine -n <hosted_cluster_namespace>",
"hcp destroy cluster agent --name <cluster_name>",
"reason: Unschedulable message: '0/6 nodes are available: 3 Insufficient cpu, 3 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.'",
"#!/bin/bash MCE_NAMESPACE=<namespace> delete multiclusterengine --all delete apiservice v1.admission.cluster.open-cluster-management.io v1.admission.work.open-cluster-management.io delete crd discoveredclusters.discovery.open-cluster-management.io discoveryconfigs.discovery.open-cluster-management.io delete mutatingwebhookconfiguration ocm-mutating-webhook managedclustermutators.admission.cluster.open-cluster-management.io delete validatingwebhookconfiguration ocm-validating-webhook delete ns USDMCE_NAMESPACE",
"-n multicluster-engine get pods -l app=managedcluster-import-controller-v2",
"-n multicluster-engine logs -l app=managedcluster-import-controller-v2 --tail=-1",
"-n <managed_cluster_name> get secrets <managed_cluster_name>-import",
"-n multicluster-engine logs -l app=managedcluster-import-controller-v2 --tail=-1 | grep importconfig-controller",
"get managedcluster <managed_cluster_name> -o=jsonpath='{range .status.conditions[*]}{.type}{\"\\t\"}{.status}{\"\\t\"}{.message}{\"\\n\"}{end}' | grep ManagedClusterImportSucceeded",
"-n multicluster-engine logs -l app=managedcluster-import-controller-v2 -f",
"cluster_name=<your-managed-cluster-name>",
"kubeconfig_secret_name=USD(oc -n USD{cluster_name} get clusterdeployments USD{cluster_name} -ojsonpath='{.spec.clusterMetadata.adminKubeconfigSecretRef.name}')",
"-n USD{cluster_name} get secret USD{kubeconfig_secret_name} -ojsonpath={.data.kubeconfig} | base64 -d > kubeconfig.old",
"export KUBECONFIG=kubeconfig.old",
"get ns",
"cluster_name=<managed_cluster_name> kubeconfig_file=<path_to_kubeconfig>",
"kubeconfig=USD(cat USD{kubeconfig_file} | base64 -w0)",
"kubeconfig=USD(cat USD{kubeconfig_file} | base64)",
"kubeconfig_patch=\"[\\{\\\"op\\\":\\\"replace\\\", \\\"path\\\":\\\"/data/kubeconfig\\\", \\\"value\\\":\\\"USD{kubeconfig}\\\"}, \\{\\\"op\\\":\\\"replace\\\", \\\"path\\\":\\\"/data/raw-kubeconfig\\\", \\\"value\\\":\\\"USD{kubeconfig}\\\"}]\"",
"kubeconfig_secret_name=USD(oc -n USD{cluster_name} get clusterdeployments USD{cluster_name} -ojsonpath='{.spec.clusterMetadata.adminKubeconfigSecretRef.name}')",
"-n USD{cluster_name} patch secrets USD{kubeconfig_secret_name} --type='json' -p=\"USD{kubeconfig_patch}\"",
"get pod -n open-cluster-management-agent | grep klusterlet-registration-agent",
"logs <registration_agent_pod> -n open-cluster-management-agent",
"get infrastructure cluster -o yaml | grep apiServerURL",
"E0917 03:04:05.874759 1 manifestwork_controller.go:179] Reconcile work test-1-klusterlet-addon-workmgr fails with err: Failed to update work status with err Get \"https://api.aaa-ocp.dev02.location.com:6443/apis/cluster.management.io/v1/namespaces/test-1/manifestworks/test-1-klusterlet-addon-workmgr\": x509: certificate signed by unknown authority E0917 03:04:05.874887 1 base_controller.go:231] \"ManifestWorkAgent\" controller failed to sync \"test-1-klusterlet-addon-workmgr\", err: Failed to update work status with err Get \"api.aaa-ocp.dev02.location.com:6443/apis/cluster.management.io/v1/namespaces/test-1/manifestworks/test-1-klusterlet-addon-workmgr\": x509: certificate signed by unknown authority E0917 03:04:37.245859 1 reflector.go:127] k8s.io/[email protected]/tools/cache/reflector.go:156: Failed to watch *v1.ManifestWork: failed to list *v1.ManifestWork: Get \"api.aaa-ocp.dev02.location.com:6443/apis/cluster.management.io/v1/namespaces/test-1/manifestworks?resourceVersion=607424\": x509: certificate signed by unknown authority",
"I0917 02:27:41.525026 1 event.go:282] Event(v1.ObjectReference{Kind:\"Namespace\", Namespace:\"open-cluster-management-agent\", Name:\"open-cluster-management-agent\", UID:\"\", APIVersion:\"v1\", ResourceVersion:\"\", FieldPath:\"\"}): type: 'Normal' reason: 'ManagedClusterAvailableConditionUpdated' update managed cluster \"test-1\" available condition to \"True\", due to \"Managed cluster is available\" E0917 02:58:26.315984 1 reflector.go:127] k8s.io/[email protected]/tools/cache/reflector.go:156: Failed to watch *v1beta1.CertificateSigningRequest: Get \"https://api.aaa-ocp.dev02.location.com:6443/apis/cluster.management.io/v1/managedclusters?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dtest-1&resourceVersion=607408&timeout=9m33s&timeoutSeconds=573&watch=true\"\": x509: certificate signed by unknown authority E0917 02:58:26.598343 1 reflector.go:127] k8s.io/[email protected]/tools/cache/reflector.go:156: Failed to watch *v1.ManagedCluster: Get \"https://api.aaa-ocp.dev02.location.com:6443/apis/cluster.management.io/v1/managedclusters?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dtest-1&resourceVersion=607408&timeout=9m33s&timeoutSeconds=573&watch=true\": x509: certificate signed by unknown authority E0917 02:58:27.613963 1 reflector.go:127] k8s.io/[email protected]/tools/cache/reflector.go:156: Failed to watch *v1.ManagedCluster: failed to list *v1.ManagedCluster: Get \"https://api.aaa-ocp.dev02.location.com:6443/apis/cluster.management.io/v1/managedclusters?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dtest-1&resourceVersion=607408&timeout=9m33s&timeoutSeconds=573&watch=true\"\": x509: certificate signed by unknown authority",
"delete secret -n <cluster_name> <cluster_name>-import",
"delete secret -n <cluster_name> <cluster_name>-import",
"get secret -n <cluster_name> <cluster_name>-import -ojsonpath='{.data.import\\.yaml}' | base64 --decode > import.yaml",
"apply -f import.yaml",
"edit managedcluster <cluster-name>",
"time=\"2020-08-07T15:27:55Z\" level=error msg=\"Error: error setting up new vSphere SOAP client: Post https://147.1.1.1/sdk: x509: cannot validate certificate for xx.xx.xx.xx because it doesn't contain any IP SANs\" time=\"2020-08-07T15:27:55Z\" level=error",
"Error: error setting up new vSphere SOAP client: Post https://vspherehost.com/sdk: x509: certificate signed by unknown authority\"",
"x509: certificate has expired or is not yet valid",
"time=\"2020-08-07T19:41:58Z\" level=debug msg=\"vsphere_tag_category.category: Creating...\" time=\"2020-08-07T19:41:58Z\" level=error time=\"2020-08-07T19:41:58Z\" level=error msg=\"Error: could not create category: POST https://vspherehost.com/rest/com/vmware/cis/tagging/category: 403 Forbidden\" time=\"2020-08-07T19:41:58Z\" level=error time=\"2020-08-07T19:41:58Z\" level=error msg=\" on ../tmp/openshift-install-436877649/main.tf line 54, in resource \\\"vsphere_tag_category\\\" \\\"category\\\":\" time=\"2020-08-07T19:41:58Z\" level=error msg=\" 54: resource \\\"vsphere_tag_category\\\" \\\"category\\\" {\"",
"failed to fetch Master Machines: failed to load asset \\\\\\\"Install Config\\\\\\\": invalid \\\\\\\"install-config.yaml\\\\\\\" file: platform.vsphere.dnsVIP: Invalid value: \\\\\\\"\\\\\\\": \\\\\\\"\\\\\\\" is not a valid IP",
"time=\"2020-08-11T14:31:38-04:00\" level=debug msg=\"vsphereprivate_import_ova.import: Creating...\" time=\"2020-08-11T14:31:39-04:00\" level=error time=\"2020-08-11T14:31:39-04:00\" level=error msg=\"Error: rpc error: code = Unavailable desc = transport is closing\" time=\"2020-08-11T14:31:39-04:00\" level=error time=\"2020-08-11T14:31:39-04:00\" level=error time=\"2020-08-11T14:31:39-04:00\" level=fatal msg=\"failed to fetch Cluster: failed to generate asset \\\"Cluster\\\": failed to create cluster: failed to apply Terraform: failed to complete the change\"",
"ERROR ERROR Error: error reconfiguring virtual machine: error processing disk changes post-clone: disk.0: ServerFaultCode: NoPermission: RESOURCE (vm-71:2000), ACTION (queryAssociatedProfile): RESOURCE (vm-71), ACTION (PolicyIDByVirtualDisk)",
"get pod -n <new_cluster_name>",
"logs <new_cluster_name_provision_pod_name> -n <new_cluster_name> -c hive",
"describe clusterdeployments -n <new_cluster_name>",
"No subnets provided for zones",
"customresourcedefinition.apiextensions.k8s.io/klusterlets.operator.open-cluster-management.io configured clusterrole.rbac.authorization.k8s.io/klusterlet configured clusterrole.rbac.authorization.k8s.io/open-cluster-management:klusterlet-admin-aggregate-clusterrole configured clusterrolebinding.rbac.authorization.k8s.io/klusterlet configured namespace/open-cluster-management-agent configured secret/open-cluster-management-image-pull-credentials unchanged serviceaccount/klusterlet configured deployment.apps/klusterlet unchanged klusterlet.operator.open-cluster-management.io/klusterlet configured Error from server (BadRequest): error when creating \"STDIN\": Secret in version \"v1\" cannot be handled as a Secret: v1.Secret.ObjectMeta: v1.ObjectMeta.TypeMeta: Kind: Data: decode base64: illegal base64 data at input byte 1313, error found in #10 byte of ...|dhruy45=\"},\"kind\":\"|..., bigger context ...|tye56u56u568yuo7i67i67i67o556574i\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kube|",
"version",
"apply -f import.yaml",
"get klusterlets klusterlet -oyaml",
"api-resources --verbs=list --namespaced -o name | grep -E '^secrets|^serviceaccounts|^managedclusteraddons|^roles|^rolebindings|^manifestworks|^leases|^managedclusterinfo|^appliedmanifestworks'|^clusteroauths' | xargs -n 1 oc get --show-kind --ignore-not-found -n <cluster_name>",
"edit <resource_kind> <resource_name> -n <namespace>",
"delete ns <cluster-name>",
"delete secret auto-import-secret -n <cluster-namespace>",
"describe placement <placement-name>",
"Name: demo-placement Namespace: default Labels: <none> Annotations: <none> API Version: cluster.open-cluster-management.io/v1beta1 Kind: Placement Status: Conditions: Last Transition Time: 2022-09-30T07:39:45Z Message: Placement configurations check pass Reason: Succeedconfigured Status: False Type: PlacementMisconfigured Last Transition Time: 2022-09-30T07:39:45Z Message: No valid ManagedClusterSetBindings found in placement namespace Reason: NoManagedClusterSetBindings Status: False Type: PlacementSatisfied Number Of Selected Clusters: 0",
"Name: demo-placement Namespace: default Labels: <none> Annotations: <none> API Version: cluster.open-cluster-management.io/v1beta1 Kind: Placement Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal DecisionCreate 2m10s placementController Decision demo-placement-decision-1 is created with placement demo-placement in namespace default Normal DecisionUpdate 2m10s placementController Decision demo-placement-decision-1 is updated with placement demo-placement in namespace default Normal ScoreUpdate 2m10s placementController cluster1:0 cluster2:100 cluster3:200 Normal DecisionUpdate 3s placementController Decision demo-placement-decision-1 is updated with placement demo-placement in namespace default Normal ScoreUpdate 3s placementController cluster1:200 cluster2:145 cluster3:189 cluster4:200",
"ProvisioningError 51s metal3-baremetal-controller Image provisioning failed: Deploy step deploy.deploy failed with BadRequestError: HTTP POST https://<bmc_address>/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia returned code 400. Base.1.8.GeneralError: A general error has occurred. See ExtendedInfo for more information Extended information: [ {\"Message\": \"Unable to mount remote share https://<ironic_address>/redfish/boot-<uuid>.iso.\", 'MessageArgs': [\"https://<ironic_address>/redfish/boot-<uuid>.iso\"], \"[email protected]\": 1, \"MessageId\": \"IDRAC.2.5.RAC0720\", \"RelatedProperties\": [\"#/Image\"], \"[email protected]\": 1, \"Resolution\": \"Retry the operation.\", \"Severity\": \"Informational\"} ]",
"get vm -n <namespace>",
"get pods -n <namespace> | grep \"import\"",
"get pods -n <namespace> -l kubevirt.io=virt-launcher",
"get nodes -o yaml",
"get pods -A --field-selector=status.phase!=Running,status,phase!=Succeeded",
"patch ingresscontroller -n openshift-ingress-operator default --type=json -p '[{ \"op\": \"add\", \"path\": \"/spec/routeAdmission\", \"value\": {wildcardPolicy: \"WildcardsAllowed\"}}]'",
"get pods -n <hosted-control-plane-namespace> -l app=cloud-controller-manager",
"get pods -n openshift-cluster-csi-drivers -o wide -l app=kubevirt-csi-driver",
"get pods -n <hcp namespace> -l app=kubevirt-csi-driver",
"get imagecontentsourcepolicy -o json | jq -r '.items[].spec.repositoryDigestMirrors[0].mirrors[0]'",
"get clusterversion version -ojsonpath='{.status.desired.image}'",
"image extract --file /release-manifests/0000_50_installer_coreos-bootimages.yaml <payload_image> --confirm",
"cat 0000_50_installer_coreos-bootimages.yaml | yq -r .data.stream | jq -r '.architectures.x86_64.images.kubevirt.\"digest-ref\"'",
"image mirror <rhcos_image> <internal_registry>",
"apiVersion: config.openshift.io/v1 kind: ImageDigestMirrorSet metadata: name: rhcos-boot-kubevirt spec: repositoryDigestMirrors: - mirrors: - <rhcos_image_no_digest> 1 source: virthost.ostest.test.metalkube.org:5000/localimages/ocp-v4.0-art-dev 2",
"apply -f rhcos-boot-kubevirt.yaml",
"E0809 18:45:29.450874 1 reflector.go:147] k8s.io/[email protected]/tools/cache/reflector.go:229: Failed to watch *v1.CertificateSigningRequest: failed to list *v1.CertificateSigningRequest: Get \"https://api.xxx.openshiftapps.com:443/apis/certificates.k8s.io/v1/certificatesigningrequests?limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate signed by unknown authority",
"curl -s https://letsencrypt.org/certs/isrgrootx1.pem | base64 | tr -d \"\\n\"",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerCABundle: \"<your_ca_certificate>\"",
"apply -f <filename>",
"get secret local-cluster-import -n local-cluster -o jsonpath={.data.import/.yaml} | base64 --decode > import.yaml",
"apply -f import.yaml",
"E0809 18:45:29.450874 1 reflector.go:147] k8s.io/[email protected]/tools/cache/reflector.go:229: Failed to watch *v1.CertificateSigningRequest: failed to list *v1.CertificateSigningRequest: Get \"https://api.xxx.openshiftapps.com:443/apis/certificates.k8s.io/v1/certificatesigningrequests?limit=500&resourceVersion=0\": tls: failed to verify certificate: x509: certificate signed by unknown authority",
"curl -s https://letsencrypt.org/certs/isrgrootx1.pem | base64 | tr -d \"\\n\"",
"apiVersion: config.open-cluster-management.io/v1alpha1 kind: KlusterletConfig metadata: name: global spec: hubKubeAPIServerCABundle: \"<your_ca_certificate>\"",
"apply -f <filename>",
"get secret <cluster_name>-import -n <cluster_name> -o jsonpath={.data.import/.yaml} | base64 --decode > <cluster_name>-import.yaml",
"apply -f <cluster_name>-import.yaml",
"get clusterversion version -o jsonpath='{.status.availableUpdates[*].version}'",
"-n <cluster_name> get managedclusterinfo <cluster_name> -o jsonpath='{.status.distributionInfo.ocp.availableUpdates[*]}'",
"-n <cluster_name> get ClusterCurator <cluster_name> -o yaml",
"-n <cluster_name> delete ClusterCurator <cluster_name>",
"-n open-cluster-management-agent-addon logs klusterlet-addon-workmgr-<your_pod_name>",
"logs -n multicluster-engine <assisted_service_pod_name> 1",
"error=\"failed to get pull secret for update: invalid pull secret data in secret pull-secret\"",
"pull secret must contain auth for \\\"registry.redhat.io\\\""
] | https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_management_for_kubernetes/2.11/html/clusters/cluster_mce_overview |
B.41. libcgroup | B.41. libcgroup B.41.1. RHSA-2011:0320 - Important: libcgroup security update Updated libcgroup packages that fix two security issues are now available for Red Hat Enterprise Linux 6. The Red Hat Security Response Team has rated this update as having important security impact. Common Vulnerability Scoring System (CVSS) base score, which gives a detailed severity rating, is available for each vulnerability from the CVE link(s) associated with each description below. The libcgroup packages provide tools and libraries to control and monitor control groups. CVE-2011-1006 A heap-based buffer overflow flaw was found in the way libcgroup converted a list of user-provided controllers for a particular task into an array of strings. A local attacker could use this flaw to escalate their privileges via a specially-crafted list of controllers. CVE-2011-1022 It was discovered that libcgroup did not properly check the origin of Netlink messages. A local attacker could use this flaw to send crafted Netlink messages to the cgrulesengd daemon, causing it to put processes into one or more existing control groups, based on the attacker's choosing, possibly allowing the particular tasks to run with more resources (memory, CPU, etc.) than originally intended. Red Hat would like to thank Nelson Elhage for reporting the CVE-2011-1006 issue. All libcgroup users should upgrade to these updated packages, which contain backported patches to correct these issues. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.0_technical_notes/libcgroup |
Making open source more inclusive | Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message . | null | https://docs.redhat.com/en/documentation/red_hat_build_of_eclipse_vert.x/4.3/html/eclipse_vert.x_4.3_migration_guide/making-open-source-more-inclusive |
probe::tty.init | probe::tty.init Name probe::tty.init - Called when a tty is being initalized Synopsis tty.init Values name the driver .dev_name name module the module name driver_name the driver name | null | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/systemtap_tapset_reference/api-tty-init |
1.4.2. Direct Routing | 1.4.2. Direct Routing Building an LVS setup that uses direct routing provides increased performance benefits compared to other LVS networking topologies. Direct routing allows the real servers to process and route packets directly to a requesting user rather than passing all outgoing packets through the LVS router. Direct routing reduces the possibility of network performance issues by relegating the job of the LVS router to processing incoming packets only. Figure 1.4. LVS Implemented with Direct Routing In the typical direct routing LVS setup, the LVS router receives incoming server requests through the virtual IP (VIP) and uses a scheduling algorithm to route the request to the real servers. The real server processes the request and sends the response directly to the client, bypassing the LVS routers. This method of routing allows for scalability in that real servers can be added without the added burden on the LVS router to route outgoing packets from the real server to the client, which can become a bottleneck under heavy network load. 1.4.2.1. Direct Routing and the ARP Limitation While there are many advantages to using direct routing in LVS, there are limitations as well. The most common issue with LVS via direct routing is with Address Resolution Protocol ( ARP ). In typical situations, a client on the Internet sends a request to an IP address. Network routers typically send requests to their destination by relating IP addresses to a machine's MAC address with ARP. ARP requests are broadcast to all connected machines on a network, and the machine with the correct IP/MAC address combination receives the packet. The IP/MAC associations are stored in an ARP cache, which is cleared periodically (usually every 15 minutes) and refilled with IP/MAC associations. The issue with ARP requests in a direct routing LVS setup is that because a client request to an IP address must be associated with a MAC address for the request to be handled, the virtual IP address of the LVS system must also be associated to a MAC as well. However, since both the LVS router and the real servers all have the same VIP, the ARP request will be broadcast ed to all the machines associated with the VIP. This can cause several problems, such as the VIP being associated directly to one of the real servers and processing requests directly, bypassing the LVS router completely and defeating the purpose of the LVS setup. To solve this issue, ensure that the incoming requests are always sent to the LVS router rather than one of the real servers. This can be done by using either the arptables_jf or the iptables packet filtering tool for the following reasons: The arptables_jf prevents ARP from associating VIPs with real servers. The iptables method completely sidesteps the ARP problem by not configuring VIPs on real servers in the first place. For more information on using arptables or iptables in a direct routing LVS environment, refer to Section 3.2.1, "Direct Routing and arptables_jf " or Section 3.2.2, "Direct Routing and iptables " . | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/virtual_server_administration/s2-lvs-directrouting-vsa |
Network APIs | Network APIs OpenShift Container Platform 4.15 Reference guide for network APIs Red Hat OpenShift Documentation Team | [
"Name: \"mysvc\", Subsets: [ { Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}], Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}] }, { Addresses: [{\"ip\": \"10.10.3.3\"}], Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}] }, ]",
"Name: \"mysvc\", Subsets: [ { Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}], Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}] }, { Addresses: [{\"ip\": \"10.10.3.3\"}], Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}] }, ]",
"{ Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}], Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}] }",
"a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], b: [ 10.10.1.1:309, 10.10.2.2:309 ]"
] | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html-single/network_apis/index |
Chapter 14. Diverting messages and splitting message flows | Chapter 14. Diverting messages and splitting message flows In AMQ Broker, you can configure objects called diverts that enable you to transparently divert messages from one address to another address, without changing any client application logic. You can also configure a divert to forward a copy of a message to a specified forwarding address, effectively splitting the message flow. 14.1. How message diverts work Diverts enable you to transparently divert messages routed to one address to some other address, without changing any client application logic. Think of the set of diverts on a broker server as a type of routing table for messages. A divert can be exclusive , meaning that a message is diverted to a specified forwarding address without going to its original address. A divert can also be non-exclusive , meaning that a message continues to go to its original address, while the broker sends a copy of the message to a specified forwarding address. Therefore, you can use non-exclusive diverts for splitting message flows. For example, you might split a message flow if you want to separately monitor every order sent to an order queue. When an address has both exclusive and non-exclusive diverts configured, the broker processes the exclusive diverts first. If a particular message has already been diverted by an exclusive divert, the broker does not process any non-exclusive diverts for that message. In this case, the message never goes to the original address. When a broker diverts a message, the broker assigns a new message ID and sets the message address to the new forwarding address. You can retrieve the original message ID and address values via the _AMQ_ORIG_ADDRESS (string type) and _AMQ_ORIG_MESSAGE_ID (long type) message properties. If you are using the Core API, use the Message.HDR_ORIGINAL_ADDRESS and Message.HDR_ORIG_MESSAGE_ID properties. Note You can divert a message only to an address on the same broker server. If you want to divert to an address on a different server, a common solution is to first divert the message to a local store-and-forward queue. Then, set up a bridge that consumes from that queue and forwards messages to an address on a different broker. Combining diverts with bridges enables you to create a distributed network of routing connections between geographically distributed broker servers. In this way, you can create a global messaging mesh. 14.2. Configuring message diverts To configure a divert in your broker instance, add a divert element within the core element of your broker.xml configuration file. <core> ... <divert name= > <address> </address> <forwarding-address> </forwarding-address> <filter string= > <routing-type> </routing-type> <exclusive> </exclusive> </divert> ... </core> divert Named instance of a divert. You can add multiple divert elements to your broker.xml configuration file, as long as each divert has a unique name. address Address from which to divert messages forwarding-address Address to which to forward messages filter Optional message filter. If you configure a filter, only messages that match the filter string are diverted. If you do not specify a filter, all messages are considered a match by the divert. routing-type Routing type of the diverted message. You can configure the divert to: Apply the anycast or multicast routing type to a message Strip (that is, remove) the existing routing type Pass through (that is, preserve) the existing routing type Control of the routing type is useful in situations where the message has its routing type already set, but you want to divert the message to an address that uses a different routing type. For example, the broker cannot route a message with the anycast routing type to a queue that uses multicast unless you set the routing-type parameter of the divert to MULTICAST . Valid values for the routing-type parameter of a divert are ANYCAST , MULTICAST , PASS , and STRIP . The default value is STRIP . exclusive Specify whether the divert is exclusive (set the property to true ) or non- exclusive (set the property to false ). The following subsections show configuration examples for exclusive and non-exclusive diverts. 14.2.1. Exclusive divert example Shown below is an example configuration for an exclusive divert. An exclusive divert diverts all matching messages from the originally-configured address to a new address. Matching messages do not get routed to the original address. <divert name="prices-divert"> <address>priceUpdates</address> <forwarding-address>priceForwarding</forwarding-address> <filter string="office='New York'"/> <exclusive>true</exclusive> </divert> In the preceding example, you define a divert called prices-divert that diverts any messages sent to the address priceUpdates to another local address, priceForwarding . You also specify a message filter string. Only messages with the message property office and the value New York are diverted. All other messages are routed to their original address. Finally, you specify that the divert is exclusive. 14.2.2. Non-exclusive divert example Shown below is an example configuration for a non-exclusive divert. In a non-exclusive divert, a message continues to go to its original address, while the broker also sends a copy of the message to a specified forwarding address. Therefore, a non-exclusive divert is a way to split a message flow. <divert name="order-divert"> <address>orders</address> <forwarding-address>spyTopic</forwarding-address> <exclusive>false</exclusive> </divert> In the preceding example, you define a divert called order-divert that takes a copy of every message sent to the address orders and sends it to a local address called spyTopic . You also specify that the divert is non-exclusive. Additional resources For a detailed example that uses both exclusive and non-exclusive diverts, and a bridge to forward messages to another broker, see Divert Example (external). | [
"<core> <divert name= > <address> </address> <forwarding-address> </forwarding-address> <filter string= > <routing-type> </routing-type> <exclusive> </exclusive> </divert> </core>",
"<divert name=\"prices-divert\"> <address>priceUpdates</address> <forwarding-address>priceForwarding</forwarding-address> <filter string=\"office='New York'\"/> <exclusive>true</exclusive> </divert>",
"<divert name=\"order-divert\"> <address>orders</address> <forwarding-address>spyTopic</forwarding-address> <exclusive>false</exclusive> </divert>"
] | https://docs.redhat.com/en/documentation/red_hat_amq/2020.q4/html/configuring_amq_broker/diverting-messages-configuring |
Validation and troubleshooting | Validation and troubleshooting OpenShift Container Platform 4.15 Validating and troubleshooting an OpenShift Container Platform installation Red Hat OpenShift Documentation Team | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/validation_and_troubleshooting/index |
Appendix A. Acceptor and Connector Configuration Parameters | Appendix A. Acceptor and Connector Configuration Parameters The tables below detail some of the available parameters used to configure Netty network connections. Parameters and their values are appended to the URI of the connection string. See Configuring acceptors and connectors in network connections for more information. Each table lists the parameters by name and notes whether they can be used with acceptors or connectors or with both. You can use some parameters, for example, only with acceptors. Note All Netty parameters are defined in the class org.apache.activemq.artemis.core.remoting.impl.netty.TransportConstants . Source code is available for download on the customer portal . Table A.1. Netty TCP Parameters Parameter Use with... Description batchDelay Both Before writing packets to the acceptor or connector, the broker can be configured to batch up writes for a maximum of batchDelay milliseconds. This can increase overall throughput for very small messages. It does so at the expense of an increase in average latency for message transfer. The default value is 0 ms. connectionsAllowed Acceptors Limits the number of connections that the acceptor will allow. When this limit is reached, a DEBUG-level message is issued to the log and the connection is refused. The type of client in use determines what happens when the connection is refused. directDeliver Both When a message arrives on the server and is delivered to waiting consumers, by default, the delivery is done on the same thread as that on which the message arrived. This gives good latency in environments with relatively small messages and a small number of consumers, but at the cost of overall throughput and scalability - especially on multi-core machines. If you want the lowest latency and a possible reduction in throughput then you can use the default value for directDeliver , which is true . If you are willing to take some small extra hit on latency but want the highest throughput set directDeliver to false . handshake-timeout Acceptors Prevents an unauthorized client to open a large number of connections and keep them open. Because each connection requires a file handle, it consumes resources that are then unavailable to other clients. This timeout limits the amount of time a connection can consume resources without having been authenticated. After the connection is authenticated, you can use resource limit settings to limit resource consumption. The default value is set to 10 seconds. You can set it to any other integer value. You can turn off this option by setting it to 0 or negative integer. After you edit the timeout value, you must restart the broker. localAddress Connectors Specifies which local address the client will use when connecting to the remote address. This is typically used in the Application Server or when running Embedded to control which address is used for outbound connections. If the local-address is not set then the connector will use any local address available. localPort Connectors Specifies which local port the client will use when connecting to the remote address. This is typically used in the Application Server or when running Embedded to control which port is used for outbound connections. If the default is used, which is 0, then the connector will let the system pick up an ephemeral port. Valid ports are 0 to 65535 nioRemotingThreads Both When configured to use NIO, the broker will by default use a number of threads equal to three times the number of cores (or hyper-threads) as reported by Runtime.getRuntime().availableProcessors() for processing incoming packets. If you want to override this value, you can set the number of threads by specifying this parameter. The default value for this parameter is -1 , which means use the value derived from Runtime.getRuntime().availableProcessors() * 3. tcpNoDelay Both If this is true then Nagle's algorithm will be disabled. This is a Java (client) socket option . The default value is true . tcpReceiveBufferSize Both Determines the size of the TCP receive buffer in bytes. The default value is 32768 . tcpSendBufferSize Both Determines the size of the TCP send buffer in bytes. The default value is 32768 . TCP buffer sizes should be tuned according to the bandwidth and latency of your network. In summary TCP send/receive buffer sizes should be calculated as: buffer_size = bandwidth * RTT. Where bandwidth is in bytes per second and network round trip time (RTT) is in seconds. RTT can be easily measured using the ping utility. For fast networks you may want to increase the buffer sizes from the defaults. Table A.2. Netty HTTP Parameters Parameter Use with... Description httpClientIdleTime Acceptors How long a client can be idle before sending an empty HTTP request to keep the connection alive. httpClientIdleScanPeriod Acceptors How often, in milliseconds, to scan for idle clients. httpEnabled Acceptors No longer required. With single port support the broker will now automatically detect if HTTP is being used and configure itself. httpRequiresSessionId Both If true the client will wait after the first call to receive a session id. Used when an HTTP connector is connecting to a servlet acceptor. This configuration is not recommended. httpResponseTime Acceptors How long the server can wait before sending an empty HTTP response to keep the connection alive. httpServerScanPeriod Acceptors How often, in milliseconds, to scan for clients needing responses. Table A.3. Netty TLS/SSL Parameters Parameter Use with... Description enabledCipherSuites Both Comma-separated list of cipher suites used for SSL communication. The default value is empty which means the JVM's default will be used. enabledProtocols Both Comma-separated list of protocols used for SSL communication. The default value is empty which means the JVM's default will be used. forceSSLParameters Connectors Controls whether any SSL settings that are set as parameters on the connector are used instead of JVM system properties (including both javax.net.ssl and AMQ Broker system properties) to configure the SSL context for this connector. Valid values are true or false . The default value is false . keyStorePassword Both When used on an acceptor this is the password for the server-side keystore. When used on a connector this is the password for the client-side keystore. This is only relevant for a connector if you are using two-way SSL (that is, mutual authentication). Although this value can be configured on the server, it is downloaded and used by the client. If the client needs to use a different password from that set on the server then it can override the server-side setting by either using the customary javax.net.ssl.keyStorePassword system property or the ActiveMQ-specific org.apache.activemq.ssl.keyStorePassword system property. The ActiveMQ-specific system property is useful if another component on client is already making use of the standard, Java system property. keyStorePath Both When used on an acceptor this is the path to the SSL key store on the server which holds the server's certificates (whether self-signed or signed by an authority). When used on a connector this is the path to the client-side SSL key store which holds the client certificates. This is only relevant for a connector if you are using two-way SSL (that is, mutual authentication). Although this value is configured on the server, it is downloaded and used by the client. If the client needs to use a different path from that set on the server then it can override the server-side setting by either using the customary javax.net.ssl.keyStore system property or the ActiveMQ-specific org.apache.activemq.ssl.keyStore system property. The ActiveMQ-specific system property is useful if another component on client is already making use of the standard, Java system property. needClientAuth Acceptors Tells a client connecting to this acceptor that two-way SSL is required. Valid values are true or false . The default value is false . sslEnabled Both Must be true to enable SSL. The default value is false . trustManagerFactoryPlugin Both Defines the name of the class that implements org.apache.activemq.artemis.api.core.TrustManagerFactoryPlugin . This is a simple interface with a single method that returns a javax.net.ssl.TrustManagerFactory . The TrustManagerFactory is used when the underlying javax.net.ssl.SSLContext is initialized. This allows fine-grained customization of who or what the broker and client trust. The value of trustManagerFactoryPlugin takes precedence over all other SSL parameters that apply to the trust manager (that is, trustAll , truststoreProvider , truststorePath , truststorePassword , and crlPath ). You need to place any specified plugin on the Java classpath of the broker. You can use the <broker_instance_dir> /lib directory, since it is part of the classpath by default. trustStorePassword Both When used on an acceptor this is the password for the server-side trust store. This is only relevant for an acceptor if you are using two-way SSL (that is, mutual authentication). When used on a connector this is the password for the client-side truststore. Although this value can be configured on the server, it is downloaded and used by the client. If the client needs to use a different password from that set on the server then it can override the server-side setting by either using the customary javax.net.ssl.trustStorePassword system property or the ActiveMQ-specific org.apache.activemq.ssl.trustStorePassword system property. The ActiveMQ-specific system property is useful if another component on client is already making use of the standard, Java system property. sniHost Both When used on an acceptor, sniHost is a regular expression used to match the server_name extension on incoming SSL connections (for more information about this extension, see https://tools.ietf.org/html/rfc6066 ). If the name doesn't match, then the connection to the acceptor is rejected. A WARN message is logged if this happens. If the incoming connection doesn't include the server_name extension, then the connection is accepted. When used on a connector, the sniHost value is used for the server_name extension on the SSL connection. sslProvider Both Used to change the SSL provider between JDK and OPENSSL . The default is JDK . If set to OPENSSL , you can add netty-tcnative to your classpath to use the natively-installed OpenSSL. This option can be useful if you want to use special ciphersuite-elliptic curve combinations that are supported through OpenSSL but not through the JDK provider. trustStorePath Both When used on an acceptor this is the path to the server-side SSL key store that holds the keys of all the clients that the server trusts. This is only relevant for an acceptor if you are using two-way SSL (that is, mutual authentication). When used on a connector this is the path to the client-side SSL key store which holds the public keys of all the servers that the client trusts. Although this value can be configured on the server, it is downloaded and used by the client. If the client needs to use a different path from that set on the server then it can override the server-side setting by either using the customary javax.net.ssl.trustStore system property or the ActiveMQ-specific org.apache.activemq.ssl.trustStore system property. The ActiveMQ-specific system property is useful if another component on client is already making use of the standard, Java system property. useDefaultSslContext Connector Allows the connector to use the "default" SSL context (via SSLContext.getDefault() ), which can be set programmatically by the client (via SSLContext.setDefault(SSLContext) ). If this parameter is set to true , all other SSL-related parameters except for sslEnabled are ignored. Valid values are true or false . The default value is false . verifyHost Both When used on a connector, the CN or Subject Alternative Name values of the server's SSL certificate are compared to the hostname being connected to in order to verify that they match. This is useful for both one-way and two-way SSL. When used on an acceptor, the CN or Subject Alternative Name values of the connecting client's SSL certificate are compared to its hostname to verify that they match. This is useful only for two-way SSL. Valid values are true or false . The default value is true for connectors and false for acceptors. wantClientAuth Acceptors Tells a client connecting to this acceptor that two-way SSL is requested, but not required. Valid values are true or false . The default value is false . If the property needClientAuth is set to true , then that property takes precedence and wantClientAuth is ignored. | null | https://docs.redhat.com/en/documentation/red_hat_amq/2021.q3/html/configuring_amq_broker/acceptor_connector_params |
Chapter 1. Introduction | Chapter 1. Introduction The Migration Planning Guide documents the migration of any minor version of a Red Hat Enterprise Linux 5 installation to Red Hat Enterprise Linux 6 by highlighting key behavioral changes worthy of note when migrating. This guide is intended to increase ease of use of Red Hat Enterprise Linux 6 by providing guidelines for changes in the product between Red Hat Enterprise Linux 5 and Red Hat Enterprise Linux 6. This guide is however not designed to explain all new features: it is focused on changes to the behavior of applications or components which were part of Red Hat Enterprise Linux 5 and have changed in Red Hat Enterprise Linux 6 or whose functionality has been superseded by another package. 1.1. Red Hat Enterprise Linux 6 Red Hat Enterprise Linux is the leading platform for open source computing. It is sold by subscription, delivers continuous value and is certified by top enterprise hardware and software vendors. From the desktop to the datacenter, Red Hat Enterprise Linux couples the innovation of open source technology and the stability of a true enterprise-class platform. Red Hat Enterprise Linux 6 is the generation of Red Hat's comprehensive suite of operating systems, designed for mission-critical enterprise computing. This release is available as a single kit on the following architectures: i386 AMD64/Intel64 System z IBM Power (64-bit) In this release, Red Hat brings together improvements across the server, desktop and the overall Red Hat open source experience. The following are some of the many improvements and new features that are included in this release: Power Management Tickless kernel and improvements through the application stack to reduce wakeups, power consumption measurement by PowerTOP, Power Management (ASPM, ALPM), and adaptive system tuning by Tuned. generation Networking Comprehensive IPv6 support (NFS 4, CIFS, mobile support [RFC 3775], ISATAP support), FCoE, iSCSI, and a new and improved mac80211 wireless stack. Reliability, Availability, and Serviceability System level enhancements from industry collaborations to make the most of hardware RAS capabilities and NUMA architectures. Fine-grained Control and Management Improved scheduler and better resource management in the kernel using Completely Fair Scheduler (CFS) and Control Groups (CG). Scalable Filesystems ext4 is the default filesystem, and xfs offers robustness, scalability, and high-performance. Virtualization KVM includes performance improvements and new features, sVirt protects the host, VMs, and data from a guest breach, SRIOV and NPIV deliver high performance virtual use of physical devices, and libvirt leverages kernel CG controller functionality. Enterprise Security Enhancement SELinux includes improved ease of use, application sandboxing, and significantly increased coverage of system services, while SSSD provides unified access to identity and authentication services as well as caching for off-line use. Development and Runtime Support SystemTap (allows instrumentation of a running kernel without recompilation), ABRT (simple collection of bug information), and improvements to GCC (version 4.4.3), glibc (version 2.11.1), and GDB (version 7.0.1). | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/migration_planning_guide/chap-migration_guide-introduction |
function::sock_prot_num2str | function::sock_prot_num2str Name function::sock_prot_num2str - Given a protocol number, return a string representation Synopsis Arguments proto The protocol number | [
"sock_prot_num2str:string(proto:long)"
] | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/systemtap_tapset_reference/api-sock-prot-num2str |
7.112. libxcb | 7.112. libxcb 7.112.1. RHBA-2015:1358 - libxcb and libX11 bug fix update Updated libxcb and libX11 packages that fix several bugs are now available for Red Hat Enterprise Linux 6. The libxcb packages provide the X protocol C-language Binding (XCB) library. XCB is a replacement for Xlib featuring a small footprint, latency hiding, direct access to the protocol, improved threading support, and extensibility. The libX11 packages contain the core X11 protocol client library. Bug Fixes BZ# 667789 Previously, the "mute microphone" key in some cases did not work when using Red Hat Enterprise Linux 6. With this update, libX11 properly resolves the key symbol assigned to the "mute microphone" key by the xkeyboard-config keyboard layout files, and the "mute microphone" key now works as expected. BZ# 1206240 , BZ# 1046410 , BZ# 1164296 On 32-bit architectures, an X11 protocol client was under certain circumstances disconnected after processing a large number of X11 requests. With this update, the libxcb library exposes the request sequence number as a 64-bit integer so that libX11 can make use of 64-bit sequence number even on 32-bit systems. As a result, the described failure of the X11 client no longer occurs. Users of libxcb and libX11 are advised to upgrade to these updated packages, which fix these bugs. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.7_technical_notes/package-libxcb |
Chapter 19. Migrating a standalone Red Hat Quay deployment to a Red Hat Quay Operator deployment | Chapter 19. Migrating a standalone Red Hat Quay deployment to a Red Hat Quay Operator deployment The following procedures allow you to back up a standalone Red Hat Quay deployment and migrate it to the Red Hat Quay Operator on OpenShift Container Platform. 19.1. Backing up a standalone deployment of Red Hat Quay Procedure Back up the config.yaml of your standalone Red Hat Quay deployment: USD mkdir /tmp/quay-backup USD cp /path/to/Quay/config/directory/config.yaml /tmp/quay-backup Create a backup of the database that your standalone Red Hat Quay deployment is using: USD pg_dump -h DB_HOST -p 5432 -d QUAY_DATABASE_NAME -U QUAY_DATABASE_USER -W -O > /tmp/quay-backup/quay-database-backup.sql Install the AWS CLI if you do not have it already. Create an ~/.aws/ directory: USD mkdir ~/.aws/ Obtain the access_key and secret_key from the config.yaml of your standalone deployment: USD grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/config.yaml Example output: DISTRIBUTED_STORAGE_CONFIG: minio-1: - RadosGWStorage - access_key: ########## bucket_name: quay hostname: 172.24.10.50 is_secure: false port: "9000" secret_key: ########## storage_path: /datastorage/registry Store the access_key and secret_key from the config.yaml file in your ~/.aws directory: USD touch ~/.aws/credentials Optional: Check that your access_key and secret_key are stored: USD cat > ~/.aws/credentials << EOF [default] aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG EOF Example output: aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG Note If the aws cli does not automatically collect the access_key and secret_key from the `~/.aws/credentials file , you can, you can configure these by running aws configure and manually inputting the credentials. In your quay-backup directory, create a bucket_backup directory: USD mkdir /tmp/quay-backup/bucket-backup Backup all blobs from the S3 storage: USD aws s3 sync --no-verify-ssl --endpoint-url https://PUBLIC_S3_ENDPOINT:PORT s3://QUAY_BUCKET/ /tmp/quay-backup/bucket-backup/ Note The PUBLIC_S3_ENDPOINT can be read from the Red Hat Quay config.yaml file under hostname in the DISTRIBUTED_STORAGE_CONFIG . If the endpoint is insecure, use http instead of https in the endpoint URL. Up to this point, you should have a complete backup of all Red Hat Quay data, blobs, the database, and the config.yaml file stored locally. In the following section, you will migrate the standalone deployment backup to Red Hat Quay on OpenShift Container Platform. 19.2. Using backed up standalone content to migrate to OpenShift Container Platform. Prerequisites Your standalone Red Hat Quay data, blobs, database, and config.yaml have been backed up. Red Hat Quay is deployed on OpenShift Container Platform using the Red Hat Quay Operator. A QuayRegistry with all components set to managed . Procedure The procedure in this documents uses the following namespace: quay-enterprise . Scale down the Red Hat Quay Operator: USD oc scale --replicas=0 deployment quay-operator.v3.6.2 -n openshift-operators Scale down the application and mirror deployments: USD oc scale --replicas=0 deployment QUAY_MAIN_APP_DEPLOYMENT QUAY_MIRROR_DEPLOYMENT Copy the database SQL backup to the Quay PostgreSQL database instance: USD oc cp /tmp/user/quay-backup/quay-database-backup.sql quay-enterprise/quayregistry-quay-database-54956cdd54-p7b2w:/var/lib/pgsql/data/userdata Obtain the database password from the Operator-created config.yaml file: USD oc get deployment quay-quay-app -o json | jq '.spec.template.spec.volumes[].projected.sources' | grep -i config-secret Example output: "name": "QUAY_CONFIG_SECRET_NAME" USD oc get secret quay-quay-config-secret-9t77hb84tb -o json | jq '.data."config.yaml"' | cut -d '"' -f2 | base64 -d -w0 > /tmp/quay-backup/operator-quay-config-yaml-backup.yaml cat /tmp/quay-backup/operator-quay-config-yaml-backup.yaml | grep -i DB_URI Example output: Execute a shell inside of the database pod: # oc exec -it quay-postgresql-database-pod -- /bin/bash Enter psql: bash-4.4USD psql Drop the database: postgres=# DROP DATABASE "example-restore-registry-quay-database"; Example output: Create a new database and set the owner as the same name: postgres=# CREATE DATABASE "example-restore-registry-quay-database" OWNER "example-restore-registry-quay-database"; Example output: Connect to the database: postgres=# \c "example-restore-registry-quay-database"; Example output: You are now connected to database "example-restore-registry-quay-database" as user "postgres". Create a pg_trmg extension of your Quay database: example-restore-registry-quay-database=# create extension pg_trgm ; Example output: CREATE EXTENSION Exit the postgres CLI to re-enter bash-4.4: \q Set the password for your PostgreSQL deployment: bash-4.4USD psql -h localhost -d "QUAY_DATABASE_NAME" -U QUAY_DATABASE_OWNER -W < /var/lib/pgsql/data/userdata/quay-database-backup.sql Example output: Exit bash mode: bash-4.4USD exit Create a new configuration bundle for the Red Hat Quay Operator. USD touch config-bundle.yaml In your new config-bundle.yaml , include all of the information that the registry requires, such as LDAP configuration, keys, and other modifications that your old registry had. Run the following command to move the secret_key to your config-bundle.yaml : USD cat /tmp/quay-backup/config.yaml | grep SECRET_KEY > /tmp/quay-backup/config-bundle.yaml Note You must manually copy all the LDAP, OIDC and other information and add it to the /tmp/quay-backup/config-bundle.yaml file. Create a configuration bundle secret inside of your OpenShift cluster: USD oc create secret generic new-custom-config-bundle --from-file=config.yaml=/tmp/quay-backup/config-bundle.yaml Scale up the Quay pods: Scale up the mirror pods: Patch the QuayRegistry CRD so that it contains the reference to the new custom configuration bundle: Note If Red Hat Quay returns a 500 internal server error, you might have to update the location of your DISTRIBUTED_STORAGE_CONFIG to default . Create a new AWS credentials.yaml in your /.aws/ directory and include the access_key and secret_key from the Operator-created config.yaml file: USD touch credentials.yaml USD grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/operator-quay-config-yaml-backup.yaml USD cat > ~/.aws/credentials << EOF [default] aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG EOF Note If the aws cli does not automatically collect the access_key and secret_key from the `~/.aws/credentials file , you can configure these by running aws configure and manually inputting the credentials. Record the NooBaa's publicly available endpoint: USD oc get route s3 -n openshift-storage -o yaml -o jsonpath="{.spec.host}{'\n'}" Sync the backup data to the NooBaa backend storage: USD aws s3 sync --no-verify-ssl --endpoint-url https://NOOBAA_PUBLIC_S3_ROUTE /tmp/quay-backup/bucket-backup/* s3://QUAY_DATASTORE_BUCKET_NAME Scale the Operator back up to 1 pod: USD oc scale -replicas=1 deployment quay-operator.v3.6.4 -n openshift-operators The Operator uses the custom configuration bundle provided and reconciles all secrets and deployments. Your new Red Hat Quay deployment on OpenShift Container Platform should contain all of the information that the old deployment had. You should be able to pull all images. | [
"mkdir /tmp/quay-backup cp /path/to/Quay/config/directory/config.yaml /tmp/quay-backup",
"pg_dump -h DB_HOST -p 5432 -d QUAY_DATABASE_NAME -U QUAY_DATABASE_USER -W -O > /tmp/quay-backup/quay-database-backup.sql",
"mkdir ~/.aws/",
"grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/config.yaml",
"DISTRIBUTED_STORAGE_CONFIG: minio-1: - RadosGWStorage - access_key: ########## bucket_name: quay hostname: 172.24.10.50 is_secure: false port: \"9000\" secret_key: ########## storage_path: /datastorage/registry",
"touch ~/.aws/credentials",
"cat > ~/.aws/credentials << EOF [default] aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG EOF",
"aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG",
"mkdir /tmp/quay-backup/bucket-backup",
"aws s3 sync --no-verify-ssl --endpoint-url https://PUBLIC_S3_ENDPOINT:PORT s3://QUAY_BUCKET/ /tmp/quay-backup/bucket-backup/",
"oc scale --replicas=0 deployment quay-operator.v3.6.2 -n openshift-operators",
"oc scale --replicas=0 deployment QUAY_MAIN_APP_DEPLOYMENT QUAY_MIRROR_DEPLOYMENT",
"oc cp /tmp/user/quay-backup/quay-database-backup.sql quay-enterprise/quayregistry-quay-database-54956cdd54-p7b2w:/var/lib/pgsql/data/userdata",
"oc get deployment quay-quay-app -o json | jq '.spec.template.spec.volumes[].projected.sources' | grep -i config-secret",
"\"name\": \"QUAY_CONFIG_SECRET_NAME\"",
"oc get secret quay-quay-config-secret-9t77hb84tb -o json | jq '.data.\"config.yaml\"' | cut -d '\"' -f2 | base64 -d -w0 > /tmp/quay-backup/operator-quay-config-yaml-backup.yaml",
"cat /tmp/quay-backup/operator-quay-config-yaml-backup.yaml | grep -i DB_URI",
"postgresql://QUAY_DATABASE_OWNER:PASSWORD@DATABASE_HOST/QUAY_DATABASE_NAME",
"oc exec -it quay-postgresql-database-pod -- /bin/bash",
"bash-4.4USD psql",
"postgres=# DROP DATABASE \"example-restore-registry-quay-database\";",
"DROP DATABASE",
"postgres=# CREATE DATABASE \"example-restore-registry-quay-database\" OWNER \"example-restore-registry-quay-database\";",
"CREATE DATABASE",
"postgres=# \\c \"example-restore-registry-quay-database\";",
"You are now connected to database \"example-restore-registry-quay-database\" as user \"postgres\".",
"example-restore-registry-quay-database=# create extension pg_trgm ;",
"CREATE EXTENSION",
"\\q",
"bash-4.4USD psql -h localhost -d \"QUAY_DATABASE_NAME\" -U QUAY_DATABASE_OWNER -W < /var/lib/pgsql/data/userdata/quay-database-backup.sql",
"SET SET SET SET SET",
"bash-4.4USD exit",
"touch config-bundle.yaml",
"cat /tmp/quay-backup/config.yaml | grep SECRET_KEY > /tmp/quay-backup/config-bundle.yaml",
"oc create secret generic new-custom-config-bundle --from-file=config.yaml=/tmp/quay-backup/config-bundle.yaml",
"oc scale --replicas=1 deployment quayregistry-quay-app deployment.apps/quayregistry-quay-app scaled",
"oc scale --replicas=1 deployment quayregistry-quay-mirror deployment.apps/quayregistry-quay-mirror scaled",
"oc patch quayregistry QUAY_REGISTRY_NAME --type=merge -p '{\"spec\":{\"configBundleSecret\":\"new-custom-config-bundle\"}}'",
"touch credentials.yaml",
"grep -i DISTRIBUTED_STORAGE_CONFIG -A10 /tmp/quay-backup/operator-quay-config-yaml-backup.yaml",
"cat > ~/.aws/credentials << EOF [default] aws_access_key_id = ACCESS_KEY_FROM_QUAY_CONFIG aws_secret_access_key = SECRET_KEY_FROM_QUAY_CONFIG EOF",
"oc get route s3 -n openshift-storage -o yaml -o jsonpath=\"{.spec.host}{'\\n'}\"",
"aws s3 sync --no-verify-ssl --endpoint-url https://NOOBAA_PUBLIC_S3_ROUTE /tmp/quay-backup/bucket-backup/* s3://QUAY_DATASTORE_BUCKET_NAME",
"oc scale -replicas=1 deployment quay-operator.v3.6.4 -n openshift-operators"
] | https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html/manage_red_hat_quay/migrating-standalone-quay-to-operator |
4.12.3. Disabling ACPI Completely in the grub.conf File | 4.12.3. Disabling ACPI Completely in the grub.conf File The preferred method of disabling ACPI Soft-Off is with chkconfig management ( Section 4.12.1, "Disabling ACPI Soft-Off with chkconfig Management" ). If the preferred method is not effective for your cluster, you can disable ACPI Soft-Off with the BIOS power management ( Section 4.12.2, "Disabling ACPI Soft-Off with the BIOS" ). If neither of those methods is effective for your cluster, you can disable ACPI completely by appending acpi=off to the kernel boot command line in the grub.conf file. Important This method completely disables ACPI; some computers do not boot correctly if ACPI is completely disabled. Use this method only if the other methods are not effective for your cluster. You can disable ACPI completely by editing the grub.conf file of each cluster node as follows: Open /boot/grub/grub.conf with a text editor. Append acpi=off to the kernel boot command line in /boot/grub/grub.conf (see Example 4.2, "Kernel Boot Command Line with acpi=off Appended to It" ). Reboot the node. When the cluster is configured and running, verify that the node turns off immediately when fenced. For information on testing a fence device, see How to test fence devices and fencing configuration in a RHEL 5, 6, or 7 High Availability cluster? . Example 4.2. Kernel Boot Command Line with acpi=off Appended to It In this example, acpi=off has been appended to the kernel boot command line - the line starting with "kernel /vmlinuz-2.6.32-193.el6.x86_64.img". | [
"grub.conf generated by anaconda # Note that you do not have to rerun grub after making changes to this file NOTICE: You have a /boot partition. This means that all kernel and initrd paths are relative to /boot/, eg. root (hd0,0) kernel /vmlinuz-version ro root=/dev/mapper/vg_doc01-lv_root initrd /initrd-[generic-]version.img #boot=/dev/hda default=0 timeout=5 serial --unit=0 --speed=115200 terminal --timeout=5 serial console title Red Hat Enterprise Linux Server (2.6.32-193.el6.x86_64) root (hd0,0) kernel /vmlinuz-2.6.32-193.el6.x86_64 ro root=/dev/mapper/vg_doc01-lv_root console=ttyS0,115200n8 acpi=off initrd /initramfs-2.6.32-131.0.15.el6.x86_64.img"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/configuring_the_red_hat_high_availability_add-on_with_pacemaker/s2-acpi-disable-boot-ca |
2.2. Performance | 2.2. Performance Older virtualization versions supported only a single CPU. As a result, virtual machines experienced noticeable performance limitations. This created a long-lasting misconception that virtualization solutions are slow. This is no longer the case. Modern virtualization technology has greatly improved the speed of virtual machines. Benchmarks show that virtual machines can run typical server applications nearly as efficiently as bare-metal systems: Red Hat Enterprise Linux 6.4 and KVM recorded an industry-leading TPC-C benchmark with an IBM DB2 database running in an entirely virtualized x86 environment and delivering 88% of bare-metal performance. Due to resource demands, databases have previously been reserved for bare-metal deployments only. The industry standard SAP Sales and Distribution (SD) Standard Application Benchmark found that Red Hat Enterprise Linux 6.2 and KVM performs at the virtualization efficiency of 85% when compared to a bare-metal system running on identical hardware. Red Hat Enterprise Linux 6.1 and KVM achieved record-setting virtualization performance in the SPECvirt_sc2010 benchmark recorded by the Standard Performance Evaluation Corporation (SPEC), setting the best virtual performance mark of any published SPECvirt result. The SPECvirt_sc2010 metric measures the end-to-end performance of system components in virtualized data center servers. Note For more information on performance tuning for virtualization, see the Red Hat Enterprise Linux 7 Virtualization Tuning and Optimization Guide . | null | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/virtualization_getting_started_guide/sec-virtualization_getting_started-advantages-performance |
Chapter 5. View OpenShift Data Foundation Topology | Chapter 5. View OpenShift Data Foundation Topology The topology shows the mapped visualization of the OpenShift Data Foundation storage cluster at various abstraction levels and also lets you to interact with these layers. The view also shows how the various elements compose the Storage cluster altogether. Procedure On the OpenShift Web Console, navigate to Storage Data Foundation Topology . The view shows the storage cluster and the zones inside it. You can see the nodes depicted by circular entities within the zones, which are indicated by dotted lines. The label of each item or resource contains basic information such as status and health or indication for alerts. Choose a node to view node details on the right-hand panel. You can also access resources or deployments within a node by clicking on the search/preview decorator icon. To view deployment details Click the preview decorator on a node. A modal window appears above the node that displays all of the deployments associated with that node along with their statuses. Click the Back to main view button in the model's upper left corner to close and return to the view. Select a specific deployment to see more information about it. All relevant data is shown in the side panel. Click the Resources tab to view the pods information. This tab provides a deeper understanding of the problems and offers granularity that aids in better troubleshooting. Click the pod links to view the pod information page on OpenShift Container Platform. The link opens in a new window. | null | https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.17/html/deploying_openshift_data_foundation_using_ibm_power/viewing-odf-topology_mcg-verify |
Chapter 128. KafkaMirrorMakerSpec schema reference | Chapter 128. KafkaMirrorMakerSpec schema reference Used in: KafkaMirrorMaker Full list of KafkaMirrorMakerSpec schema properties Configures Kafka MirrorMaker. 128.1. include Use the include property to configure a list of topics that Kafka MirrorMaker mirrors from the source to the target Kafka cluster. The property allows any regular expression from the simplest case with a single topic name to complex patterns. For example, you can mirror topics A and B using A|B or all topics using * . You can also pass multiple regular expressions separated by commas to the Kafka MirrorMaker. 128.2. KafkaMirrorMakerConsumerSpec and KafkaMirrorMakerProducerSpec Use the KafkaMirrorMakerConsumerSpec and KafkaMirrorMakerProducerSpec to configure source (consumer) and target (producer) clusters. Kafka MirrorMaker always works together with two Kafka clusters (source and target). To establish a connection, the bootstrap servers for the source and the target Kafka clusters are specified as comma-separated lists of HOSTNAME:PORT pairs. Each comma-separated list contains one or more Kafka brokers or a Service pointing to Kafka brokers specified as a HOSTNAME:PORT pair. 128.3. logging Kafka MirrorMaker has its own configurable logger: mirrormaker.root.logger MirrorMaker uses the Apache log4j logger implementation. Use the logging property to configure loggers and logger levels. You can set the log levels by specifying the logger and level directly (inline) or use a custom (external) ConfigMap. If a ConfigMap is used, you set logging.valueFrom.configMapKeyRef.name property to the name of the ConfigMap containing the external logging configuration. Inside the ConfigMap, the logging configuration is described using log4j.properties . Both logging.valueFrom.configMapKeyRef.name and logging.valueFrom.configMapKeyRef.key properties are mandatory. A ConfigMap using the exact logging configuration specified is created with the custom resource when the Cluster Operator is running, then recreated after each reconciliation. If you do not specify a custom ConfigMap, default logging settings are used. If a specific logger value is not set, upper-level logger settings are inherited for that logger. For more information about log levels, see Apache logging services . Here we see examples of inline and external logging. The inline logging specifies the root logger level. You can also set log levels for specific classes or loggers by adding them to the loggers property. apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaMirrorMaker spec: # ... logging: type: inline loggers: mirrormaker.root.logger: INFO log4j.logger.org.apache.kafka.clients.NetworkClient: TRACE log4j.logger.org.apache.kafka.common.network.Selector: DEBUG # ... Note Setting a log level to DEBUG may result in a large amount of log output and may have performance implications. apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaMirrorMaker spec: # ... logging: type: external valueFrom: configMapKeyRef: name: customConfigMap key: mirror-maker-log4j.properties # ... Garbage collector (GC) Garbage collector logging can also be enabled (or disabled) using the jvmOptions property . 128.4. KafkaMirrorMakerSpec schema properties Property Property type Description version string The Kafka MirrorMaker version. Defaults to the latest version. Consult the documentation to understand the process required to upgrade or downgrade the version. replicas integer The number of pods in the Deployment . image string The container image used for Kafka MirrorMaker pods. If no image name is explicitly specified, it is determined based on the spec.version configuration. The image names are specifically mapped to corresponding versions in the Cluster Operator configuration. consumer KafkaMirrorMakerConsumerSpec Configuration of source cluster. producer KafkaMirrorMakerProducerSpec Configuration of target cluster. resources ResourceRequirements CPU and memory resources to reserve. whitelist string The whitelist property has been deprecated, and should now be configured using spec.include . List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression A|B . Or, as a special case, you can mirror all topics using the regular expression * . You can also specify multiple regular expressions separated by commas. include string List of topics which are included for mirroring. This option allows any regular expression using Java-style regular expressions. Mirroring two topics named A and B is achieved by using the expression A|B . Or, as a special case, you can mirror all topics using the regular expression * . You can also specify multiple regular expressions separated by commas. jvmOptions JvmOptions JVM Options for pods. logging InlineLogging , ExternalLogging Logging configuration for MirrorMaker. metricsConfig JmxPrometheusExporterMetrics Metrics configuration. tracing JaegerTracing , OpenTelemetryTracing The configuration of tracing in Kafka MirrorMaker. template KafkaMirrorMakerTemplate Template to specify how Kafka MirrorMaker resources, Deployments and Pods , are generated. livenessProbe Probe Pod liveness checking. readinessProbe Probe Pod readiness checking. | [
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaMirrorMaker spec: # logging: type: inline loggers: mirrormaker.root.logger: INFO log4j.logger.org.apache.kafka.clients.NetworkClient: TRACE log4j.logger.org.apache.kafka.common.network.Selector: DEBUG #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaMirrorMaker spec: # logging: type: external valueFrom: configMapKeyRef: name: customConfigMap key: mirror-maker-log4j.properties #"
] | https://docs.redhat.com/en/documentation/red_hat_streams_for_apache_kafka/2.9/html/streams_for_apache_kafka_api_reference/type-KafkaMirrorMakerSpec-reference |
Chapter 2. Key Components | Chapter 2. Key Components A complete JBoss Data Virtualization solution consists of the following: The Server The server is positioned between business applications and one or more data sources. It coordinates integration of these data sources so they can be accessed by the business applications at runtime. Design Tools Various design tools are available to assist users in setting up JBoss Data Virtualization for a particular data integration solution. Administration Tools Various management tools are available for administrators to configure and monitor JBoss Data Virtualization. | null | https://docs.redhat.com/en/documentation/red_hat_jboss_data_virtualization/6.4/html/getting_started_guide/key_components |
6.10. Enabling and Disabling Cluster Resources | 6.10. Enabling and Disabling Cluster Resources The following command enables the resource specified by resource_id . The following command disables the resource specified by resource_id . | [
"pcs resource enable resource_id",
"pcs resource disable resource_id"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/high_availability_add-on_reference/s1-starting_stopping_resources-HAAR |
Chapter 3. Preparing for Red Hat Quay (high availability) | Chapter 3. Preparing for Red Hat Quay (high availability) Note This procedure presents guidance on how to set up a highly available, production-quality deployment of Red Hat Quay. 3.1. Prerequisites Here are a few things you need to know before you begin the Red Hat Quay high availability deployment: Either Postgres or MySQL can be used to provide the database service. Postgres was chosen here as the database because it includes the features needed to support Clair security scanning. Other options include: Crunchy Data PostgreSQL Operator: Although not supported directly by Red Hat, the CrunchDB Operator is available from Crunchy Data for use with Red Hat Quay. If you take this route, you should have a support contract with Crunchy Data and work directly with them for usage guidance or issues relating to the operator and their database. If your organization already has a high-availability (HA) database, you can use that database with Red Hat Quay. See the Red Hat Quay Support Policy for details on support for third-party databases and other components. Ceph Object Gateway (also called RADOS Gateway) is one example of a product that can provide the object storage needed by Red Hat Quay. If you want your Red Hat Quay setup to do geo-replication, Ceph Object Gateway or other supported object storage is required. For cloud installations, you can use any of the following cloud object storage: Amazon S3 (see S3 IAM Bucket Policy for details on configuring an S3 bucket policy for Quay) Azure Blob Storage Google Cloud Storage Ceph Object Gateway OpenStack Swift CloudFront + S3 NooBaa S3 Storage The haproxy server is used in this example, although you can use any proxy service that works for your environment. Number of systems: This procedure uses seven systems (physical or virtual) that are assigned with the following tasks: A: db01: Load balancer and database : Runs the haproxy load balancer and a Postgres database. Note that these components are not themselves highly available, but are used to indicate how you might set up your own load balancer or production database. B: quay01, quay02, quay03: Quay and Redis : Three (or more) systems are assigned to run the Quay and Redis services. C: ceph01, ceph02, ceph03, ceph04, ceph05: Ceph : Three (or more) systems provide the Ceph service, for storage. If you are deploying to a cloud, you can use the cloud storage features described earlier. This procedure employs an additional system for Ansible (ceph05) and one for a Ceph Object Gateway (ceph04). Each system should have the following attributes: Red Hat Enterprise Linux (RHEL) 8: Obtain the latest Red Hat Enterprise Linux 8 server media from the Downloads page and follow the installation instructions available in the Product Documentation for Red Hat Enterprise Linux 9 . Valid Red Hat Subscription : Configure a valid Red Hat Enterprise Linux 8 server subscription. CPUs : Two or more virtual CPUs RAM : 4GB for each A and B system; 8GB for each C system Disk space : About 20GB of disk space for each A and B system (10GB for the operating system and 10GB for docker storage). At least 30GB of disk space for C systems (or more depending on required container storage). 3.2. Using podman This document uses podman for creating and deploying containers. If you do not have podman available on your system, you should be able to use the equivalent docker commands. For more information on podman and related technologies, see Building, running, and managing Linux containers on Red Hat Enterprise Linux 8 . Note Podman is strongly recommended for highly available, production quality deployments of Red Hat Quay. Docker has not been tested with Red Hat Quay 3.13, and will be deprecated in a future release. 3.3. Setting up the HAProxy load balancer and the PostgreSQL database Use the following procedure to set up the HAProxy load balancer and the PostgreSQL database. Prerequisites You have installed the Podman or Docker CLI. Procedure On the first two systems, q01 and q02 , install the HAProxy load balancer and the PostgreSQL database. This configures HAProxy as the access point and load balancer for the following services running on other systems: Red Hat Quay (ports 80 and 443 on B systems) Redis (port 6379 on B systems) RADOS (port 7480 on C systems) Open all HAProxy ports in SELinux and selected HAProxy ports in the firewall: # setsebool -P haproxy_connect_any=on # firewall-cmd --permanent --zone=public --add-port=6379/tcp --add-port=7480/tcp success # firewall-cmd --reload success Configure the /etc/haproxy/haproxy.cfg to point to the systems and ports providing the Red Hat Quay, Redis and Ceph RADOS services. The following are examples of defaults and added frontend and backend settings: After the new haproxy.cfg file is in place, restart the HAProxy service by entering the following command: # systemctl restart haproxy Create a folder for the PostgreSQL database by entering the following command: USD mkdir -p /var/lib/pgsql/data Set the following permissions for the /var/lib/pgsql/data folder: USD chmod 777 /var/lib/pgsql/data Enter the following command to start the PostgreSQL database: USD sudo podman run -d --name postgresql_database \ -v /var/lib/pgsql/data:/var/lib/pgsql/data:Z \ -e POSTGRESQL_USER=quayuser -e POSTGRESQL_PASSWORD=quaypass \ -e POSTGRESQL_DATABASE=quaydb -p 5432:5432 \ registry.redhat.io/rhel8/postgresql-13:1-109 Note Data from the container will be stored on the host system in the /var/lib/pgsql/data directory. List the available extensions by entering the following command: USD sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_available_extensions" | /opt/rh/rh-postgresql96/root/usr/bin/psql' Example output name | default_version | installed_version | comment -----------+-----------------+-------------------+---------------------------------------- adminpack | 1.0 | | administrative functions for PostgreSQL ... Create the pg_trgm extension by entering the following command: USD sudo podman exec -it postgresql_database /bin/bash -c 'echo "CREATE EXTENSION IF NOT EXISTS pg_trgm;" | /opt/rh/rh-postgresql96/root/usr/bin/psql -d quaydb' Confirm that the pg_trgm has been created by entering the following command: USD sudo podman exec -it postgresql_database /bin/bash -c 'echo "SELECT * FROM pg_extension" | /opt/rh/rh-postgresql96/root/usr/bin/psql' Example output extname | extowner | extnamespace | extrelocatable | extversion | extconfig | extcondition ---------+----------+--------------+----------------+------------+-----------+-------------- plpgsql | 10 | 11 | f | 1.0 | | pg_trgm | 10 | 2200 | t | 1.3 | | (2 rows) Alter the privileges of the Postgres user quayuser and grant them the superuser role to give the user unrestricted access to the database: USD sudo podman exec -it postgresql_database /bin/bash -c 'echo "ALTER USER quayuser WITH SUPERUSER;" | /opt/rh/rh-postgresql96/root/usr/bin/psql' Example output ALTER ROLE If you have a firewalld service active on your system, run the following commands to make the PostgreSQL port available through the firewall: # firewall-cmd --permanent --zone=trusted --add-port=5432/tcp # firewall-cmd --reload Optional. If you do not have the postgres CLI package installed, install it by entering the following command: # yum install postgresql -y Use the psql command to test connectivity to the PostgreSQL database. Note To verify that you can access the service remotely, run the following command on a remote system. Example output Password for user test: psql (9.2.23, server 9.6.5) WARNING: psql version 9.2, server version 9.6. Some psql features might not work. Type "help" for help. test=> \q 3.4. Set Up Ceph For this Red Hat Quay configuration, we create a three-node Ceph cluster, with several other supporting nodes, as follows: ceph01, ceph02, and ceph03 - Ceph Monitor, Ceph Manager and Ceph OSD nodes ceph04 - Ceph RGW node ceph05 - Ceph Ansible administration node For details on installing Ceph nodes, see Installing Red Hat Ceph Storage on Red Hat Enterprise Linux . Once you have set up the Ceph storage cluster, create a Ceph Object Gateway (also referred to as a RADOS gateway). See Installing the Ceph Object Gateway for details. 3.4.1. Install each Ceph node On ceph01, ceph02, ceph03, ceph04, and ceph05, do the following: Review prerequisites for setting up Ceph nodes in Requirements for Installing Red Hat Ceph Storage . In particular: Decide if you want to use RAID controllers on OSD nodes . Decide if you want a separate cluster network for your Ceph Network Configuration . Prepare OSD storage (ceph01, ceph02, and ceph03 only). Set up the OSD storage on the three OSD nodes (ceph01, ceph02, and ceph03). See OSD Ansible Settings in Table 3.2 for details on supported storage types that you will enter into your Ansible configuration later. For this example, a single, unformatted block device ( /dev/sdb ), that is separate from the operating system, is configured on each of the OSD nodes. If you are installing on metal, you might want to add an extra hard drive to the machine for this purpose. Install Red Hat Enterprise Linux Server edition, as described in the RHEL 7 Installation Guide . Register and subscribe each Ceph node as described in the Registering Red Hat Ceph Storage Nodes . Here is how to subscribe to the necessary repos: Create an ansible user with root privilege on each node. Choose any name you like. For example: 3.4.2. Configure the Ceph Ansible node (ceph05) Log into the Ceph Ansible node (ceph05) and configure it as follows. You will need the ceph01, ceph02, and ceph03 nodes to be running to complete these steps. In the Ansible user's home directory create a directory to store temporary values created from the ceph-ansible playbook Enable password-less ssh for the ansible user. Run ssh-keygen on ceph05 (leave passphrase empty), then run and repeat ssh-copy-id to copy the public key to the Ansible user on ceph01, ceph02, and ceph03 systems: Install the ceph-ansible package: Create a symbolic between these two directories: Create copies of Ceph sample yml files to modify: Edit the copied group_vars/all.yml file. See General Ansible Settings in Table 3.1 for details. For example: Note that your network device and address range may differ. Edit the copied group_vars/osds.yml file. See the OSD Ansible Settings in Table 3.2 for details. In this example, the second disk device ( /dev/sdb ) on each OSD node is used for both data and journal storage: Edit the /etc/ansible/hosts inventory file to identify the Ceph nodes as Ceph monitor, OSD and manager nodes. In this example, the storage devices are identified on each node as well: Add this line to the /etc/ansible/ansible.cfg file, to save the output from each Ansible playbook run into your Ansible user's home directory: Check that Ansible can reach all the Ceph nodes you configured as your Ansible user: Run the ceph-ansible playbook (as your Ansible user): At this point, the Ansible playbook will check your Ceph nodes and configure them for the services you requested. If anything fails, make needed corrections and rerun the command. Log into one of the three Ceph nodes (ceph01, ceph02, or ceph03) and check the health of the Ceph cluster: On the same node, verify that monitoring is working using rados: 3.4.3. Install the Ceph Object Gateway On the Ansible system (ceph05), configure a Ceph Object Gateway to your Ceph Storage cluster (which will ultimately run on ceph04). See Installing the Ceph Object Gateway for details. 3.5. Set up Redis With Red Hat Enterprise Linux 8 server installed on each of the three Red Hat Quay systems (quay01, quay02, and quay03), install and start the Redis service as follows: Install / Deploy Redis : Run Redis as a container on each of the three quay0* systems: Check redis connectivity : You can use the telnet command to test connectivity to the redis service. Type MONITOR (to begin monitoring the service) and QUIT to exit: Note For more information on using podman and restarting containers, see the section "Using podman" earlier in this document. | [
"setsebool -P haproxy_connect_any=on firewall-cmd --permanent --zone=public --add-port=6379/tcp --add-port=7480/tcp success firewall-cmd --reload success",
"#--------------------------------------------------------------------- common defaults that all the 'listen' and 'backend' sections will use if not designated in their block #--------------------------------------------------------------------- defaults mode tcp log global option httplog option dontlognull option http-server-close option forwardfor except 127.0.0.0/8 option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 #--------------------------------------------------------------------- main frontend which proxys to the backends #--------------------------------------------------------------------- frontend fe_http *:80 default_backend be_http frontend fe_https *:443 default_backend be_https frontend fe_redis *:6379 default_backend be_redis frontend fe_rdgw *:7480 default_backend be_rdgw backend be_http balance roundrobin server quay01 quay01:80 check server quay02 quay02:80 check server quay03 quay03:80 check backend be_https balance roundrobin server quay01 quay01:443 check server quay02 quay02:443 check server quay03 quay03:443 check backend be_rdgw balance roundrobin server ceph01 ceph01:7480 check server ceph02 ceph02:7480 check server ceph03 ceph03:7480 check backend be_redis server quay01 quay01:6379 check inter 1s server quay02 quay02:6379 check inter 1s server quay03 quay03:6379 check inter 1s",
"systemctl restart haproxy",
"mkdir -p /var/lib/pgsql/data",
"chmod 777 /var/lib/pgsql/data",
"sudo podman run -d --name postgresql_database -v /var/lib/pgsql/data:/var/lib/pgsql/data:Z -e POSTGRESQL_USER=quayuser -e POSTGRESQL_PASSWORD=quaypass -e POSTGRESQL_DATABASE=quaydb -p 5432:5432 registry.redhat.io/rhel8/postgresql-13:1-109",
"sudo podman exec -it postgresql_database /bin/bash -c 'echo \"SELECT * FROM pg_available_extensions\" | /opt/rh/rh-postgresql96/root/usr/bin/psql'",
"name | default_version | installed_version | comment -----------+-----------------+-------------------+---------------------------------------- adminpack | 1.0 | | administrative functions for PostgreSQL",
"sudo podman exec -it postgresql_database /bin/bash -c 'echo \"CREATE EXTENSION IF NOT EXISTS pg_trgm;\" | /opt/rh/rh-postgresql96/root/usr/bin/psql -d quaydb'",
"sudo podman exec -it postgresql_database /bin/bash -c 'echo \"SELECT * FROM pg_extension\" | /opt/rh/rh-postgresql96/root/usr/bin/psql'",
"extname | extowner | extnamespace | extrelocatable | extversion | extconfig | extcondition ---------+----------+--------------+----------------+------------+-----------+-------------- plpgsql | 10 | 11 | f | 1.0 | | pg_trgm | 10 | 2200 | t | 1.3 | | (2 rows)",
"sudo podman exec -it postgresql_database /bin/bash -c 'echo \"ALTER USER quayuser WITH SUPERUSER;\" | /opt/rh/rh-postgresql96/root/usr/bin/psql'",
"ALTER ROLE",
"firewall-cmd --permanent --zone=trusted --add-port=5432/tcp",
"firewall-cmd --reload",
"yum install postgresql -y",
"psql -h localhost quaydb quayuser",
"Password for user test: psql (9.2.23, server 9.6.5) WARNING: psql version 9.2, server version 9.6. Some psql features might not work. Type \"help\" for help. test=> \\q",
"subscription-manager repos --disable=* subscription-manager repos --enable=rhel-7-server-rpms subscription-manager repos --enable=rhel-7-server-extras-rpms subscription-manager repos --enable=rhel-7-server-rhceph-3-mon-rpms subscription-manager repos --enable=rhel-7-server-rhceph-3-osd-rpms subscription-manager repos --enable=rhel-7-server-rhceph-3-tools-rpms",
"USER_NAME=ansibleadmin useradd USDUSER_NAME -c \"Ansible administrator\" passwd USDUSER_NAME New password: ********* Retype new password: ********* cat << EOF >/etc/sudoers.d/admin admin ALL = (root) NOPASSWD:ALL EOF chmod 0440 /etc/sudoers.d/USDUSER_NAME",
"USER_NAME=ansibleadmin sudo su - USDUSER_NAME [ansibleadmin@ceph05 ~]USD mkdir ~/ceph-ansible-keys",
"USER_NAME=ansibleadmin sudo su - USDUSER_NAME [ansibleadmin@ceph05 ~]USD ssh-keygen [ansibleadmin@ceph05 ~]USD ssh-copy-id USDUSER_NAME@ceph01 [ansibleadmin@ceph05 ~]USD ssh-copy-id USDUSER_NAME@ceph02 [ansibleadmin@ceph05 ~]USD ssh-copy-id USDUSER_NAME@ceph03 [ansibleadmin@ceph05 ~]USD exit #",
"yum install ceph-ansible",
"ln -s /usr/share/ceph-ansible/group_vars /etc/ansible/group_vars",
"cd /usr/share/ceph-ansible cp group_vars/all.yml.sample group_vars/all.yml cp group_vars/osds.yml.sample group_vars/osds.yml cp site.yml.sample site.yml",
"ceph_origin: repository ceph_repository: rhcs ceph_repository_type: cdn ceph_rhcs_version: 3 monitor_interface: eth0 public_network: 192.168.122.0/24",
"osd_scenario: collocated devices: - /dev/sdb dmcrypt: true osd_auto_discovery: false",
"[mons] ceph01 ceph02 ceph03 [osds] ceph01 devices=\"[ '/dev/sdb' ]\" ceph02 devices=\"[ '/dev/sdb' ]\" ceph03 devices=\"[ '/dev/sdb' ]\" [mgrs] ceph01 devices=\"[ '/dev/sdb' ]\" ceph02 devices=\"[ '/dev/sdb' ]\" ceph03 devices=\"[ '/dev/sdb' ]\"",
"retry_files_save_path = ~/",
"USER_NAME=ansibleadmin sudo su - USDUSER_NAME [ansibleadmin@ceph05 ~]USD ansible all -m ping ceph01 | SUCCESS => { \"changed\": false, \"ping\": \"pong\" } ceph02 | SUCCESS => { \"changed\": false, \"ping\": \"pong\" } ceph03 | SUCCESS => { \"changed\": false, \"ping\": \"pong\" } [ansibleadmin@ceph05 ~]USD",
"[ansibleadmin@ceph05 ~]USD cd /usr/share/ceph-ansible/ [ansibleadmin@ceph05 ~]USD ansible-playbook site.yml",
"ceph health HEALTH_OK",
"ceph osd pool create test 8 echo 'Hello World!' > hello-world.txt rados --pool test put hello-world hello-world.txt rados --pool test get hello-world fetch.txt cat fetch.txt Hello World!",
"mkdir -p /var/lib/redis chmod 777 /var/lib/redis sudo podman run -d -p 6379:6379 -v /var/lib/redis:/var/lib/redis/data:Z registry.redhat.io/rhel8/redis-5",
"yum install telnet -y telnet 192.168.122.99 6379 Trying 192.168.122.99 Connected to 192.168.122.99. Escape character is '^]'. MONITOR +OK +1525703165.754099 [0 172.17.0.1:43848] \"PING\" QUIT +OK Connection closed by foreign host."
] | https://docs.redhat.com/en/documentation/red_hat_quay/3.13/html/deploy_red_hat_quay_-_high_availability/preparing_for_red_hat_quay_high_availability |
Chapter 19. Tuning for Performance | Chapter 19. Tuning for Performance This chapter provides information on configuring Red Hat Gluster Storage and explains clear and simple activities that can improve system performance. 19.1. Disk Configuration Red Hat Gluster Storage supports JBOD (Just a Bunch of Disks) and hardware RAID storage. 19.1.1. Hardware RAID The RAID levels that are most commonly recommended are RAID 6 and RAID 10. RAID 6 provides better space efficiency, good read performance and good performance for sequential writes to large files. When configured across 12 disks, RAID 6 can provide ~40% more storage space in comparison to RAID 10, which has a 50% reduction in capacity. However, RAID 6 performance for small file writes and random writes tends to be lower than RAID 10. If the workload is strictly small files, then RAID 10 is the optimal configuration. An important parameter in hardware RAID configuration is the stripe unit size. With thin provisioned disks, the choice of RAID stripe unit size is closely related to the choice of thin-provisioning chunk size. For RAID 10, a stripe unit size of 256 KiB is recommended. For RAID 6, the stripe unit size must be chosen such that the full stripe size (stripe unit * number of data disks) is between 1 MiB and 2 MiB, preferably in the lower end of the range. Hardware RAID controllers usually allow stripe unit sizes that are a power of 2. For RAID 6 with 12 disks (10 data disks), the recommended stripe unit size is 128KiB. 19.1.2. JBOD In the JBOD configuration, physical disks are not aggregated into RAID devices, but are visible as separate disks to the operating system. This simplifies system configuration by not requiring a hardware RAID controller. If disks on the system are connected through a hardware RAID controller, refer to the RAID controller documentation on how to create a JBOD configuration; typically, JBOD is realized by exposing raw drives to the operating system using a pass-through mode. In the JBOD configuration, a single physical disk serves as storage for a Red Hat Gluster Storage brick. JBOD configurations support up to 36 disks per node with dispersed volumes and three-way replication. | null | https://docs.redhat.com/en/documentation/red_hat_gluster_storage/3.5/html/administration_guide/chap-configuring_red_hat_storage_for_enhancing_performance |
4.2. Networking | 4.2. Networking linuxptp The linuxptp package, included in Red Hat Enterprise Linux 6.4 as a Technology Preview, is an implementation of the Precision Time Protocol (PTP) according to IEEE standard 1588 for Linux. The dual design goals are to provide a robust implementation of the standard and to use the most relevant and modern Application Programming Interfaces (API) offered by the Linux kernel. Supporting legacy APIs and other platforms is not a goal. Package: linuxptp-0-0.6.20121114gite6bbbb PTP support in kernel drivers PTP support has been added as a technology preview to the ixgbe, igb, and tg3 kernel drivers. Packages: kernel-2.6.32-335 QFQ queuing discipline In Red Hat Enterprise Linux 6, the tc utility has been updated to work with the Quick Fair Scheduler (QFQ) kernel features. Users can now take advantage of the new QFQ traffic queuing discipline from userspace. This feature is considered a Technology Preview. Package: kernel-2.6.32-358 vios-proxy, BZ# 721119 vios-proxy is a stream-socket proxy for providing connectivity between a client on a virtual guest and a server on a Hypervisor host. Communication occurs over virtio-serial links. Package: vios-proxy-0.1-1 IPv6 support in IPVS The IPv6 support in IPVS (IP Virtual Server) is considered a Technology Preview. Package: kernel-2.6.32-358 | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.4_technical_notes/networking_tp |
Making open source more inclusive | Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message . | null | https://docs.redhat.com/en/documentation/red_hat_build_of_quarkus/3.8/html/security_architecture/making-open-source-more-inclusive |
Service Mesh | Service Mesh OpenShift Container Platform 4.14 Service Mesh installation, usage, and release notes Red Hat OpenShift Documentation Team | [
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: runtime: components: pilot: container: env: ENABLE_NATIVE_SIDECARS: \"true\"",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: runtime: components: pilot: container: env: PILOT_ENABLE_GATEWAY_API: \"false\"",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: gateways: openshiftRoute: enabled: true",
"spec: meshConfig discoverySelectors: - matchLabels: env: prod region: us-east1 - matchExpressions: - key: app operator: In values: - cassandra - spark",
"spec: meshConfig: extensionProviders: - name: prometheus prometheus: {} --- apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: enable-prometheus-metrics spec: metrics: - providers: - name: prometheus",
"spec: techPreview: gatewayAPI: enabled: true",
"spec: runtime: components: pilot: container: env: PILOT_ENABLE_GATEWAY_API: \"true\" PILOT_ENABLE_GATEWAY_API_STATUS: \"true\" PILOT_ENABLE_GATEWAY_API_DEPLOYMENT_CONTROLLER: \"true\"",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: cluster-wide namespace: istio-system spec: version: v2.3 techPreview: controlPlaneMode: ClusterScoped 1",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default spec: members: - '*' 1",
"kubectl get crd gateways.gateway.networking.k8s.io || { kubectl kustomize \"github.com/kubernetes-sigs/gateway-api/config/crd?ref=v0.4.0\" | kubectl apply -f -; }",
"spec: runtime: components: pilot: container: env: PILOT_ENABLE_GATEWAY_API: \"true\" PILOT_ENABLE_GATEWAY_API_STATUS: \"true\" # and optionally, for the deployment controller PILOT_ENABLE_GATEWAY_API_DEPLOYMENT_CONTROLLER: \"true\"",
"apiVersion: gateway.networking.k8s.io/v1alpha2 kind: Gateway metadata: name: gateway spec: addresses: - value: ingress.istio-gateways.svc.cluster.local type: Hostname",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: security: trust: manageNetworkPolicy: false",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: techPreview: meshConfig: defaultConfig: proxyMetadata: HTTP_STRIP_FRAGMENT_FROM_PATH_UNSAFE_IF_DISABLED: \"false\"",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: httpbin namespace: foo spec: action: DENY rules: - from: - source: namespaces: [\"dev\"] to: - operation: hosts: [\"httpbin.com\",\"httpbin.com:*\"]",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: httpbin namespace: default spec: action: DENY rules: - to: - operation: hosts: [\"httpbin.example.com:*\"]",
"spec: techPreview: global: pathNormalization: <option>",
"oc create -f <myEnvoyFilterFile>",
"apiVersion: networking.istio.io/v1alpha3 kind: EnvoyFilter metadata: name: ingress-case-insensitive namespace: istio-system spec: configPatches: - applyTo: HTTP_FILTER match: context: GATEWAY listener: filterChain: filter: name: \"envoy.filters.network.http_connection_manager\" subFilter: name: \"envoy.filters.http.router\" patch: operation: INSERT_BEFORE value: name: envoy.lua typed_config: \"@type\": \"type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua\" inlineCode: | function envoy_on_request(request_handle) local path = request_handle:headers():get(\":path\") request_handle:headers():replace(\":path\", string.lower(path)) end",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: mode: ClusterWide meshConfig: discoverySelectors: - matchLabels: istio-discovery: enabled gateways: ingress: enabled: true",
"label namespace istio-system istio-discovery=enabled",
"2023-05-02T15:20:42.541034Z error watch error in cluster Kubernetes: failed to list *v1alpha2.TLSRoute: the server could not find the requested resource (get tlsroutes.gateway.networking.k8s.io) 2023-05-02T15:20:42.616450Z info kube controller \"gateway.networking.k8s.io/v1alpha2/TCPRoute\" is syncing",
"kubectl get crd gateways.gateway.networking.k8s.io || { kubectl kustomize \"github.com/kubernetes-sigs/gateway-api/config/crd/experimental?ref=v0.5.1\" | kubectl apply -f -; }",
"apiVersion: networking.istio.io/v1beta1 kind: ProxyConfig metadata: name: mesh-wide-concurrency namespace: <istiod-namespace> spec: concurrency: 0",
"api: namespaces: exclude: - \"^istio-operator\" - \"^kube-.*\" - \"^openshift.*\" - \"^ibm.*\" - \"^kiali-operator\"",
"spec: proxy: networking: trafficControl: inbound: excludedPorts: - 15020",
"spec: runtime: components: pilot: container: env: APPLY_WASM_PLUGINS_TO_INBOUND_ONLY: \"true\"",
"error Installer exits with open /host/etc/cni/multus/net.d/v2-2-istio-cni.kubeconfig.tmp.841118073: no such file or directory",
"oc label namespace istio-system maistra.io/ignore-namespace-",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: gateways: openshiftRoute: enabled: true",
"An error occurred admission webhook smcp.validation.maistra.io denied the request: [support for policy.type \"Mixer\" and policy.Mixer options have been removed in v2.1, please use another alternative, support for telemetry.type \"Mixer\" and telemetry.Mixer options have been removed in v2.1, please use another alternative]\"",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: policy: type: Istiod telemetry: type: Istiod version: v2.6",
"oc project istio-system",
"oc get smcp -o yaml",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6",
"oc get smcp -o yaml",
"oc get smcp.v1.maistra.io <smcp_name> > smcp-resource.yaml #Edit the smcp-resource.yaml file. oc replace -f smcp-resource.yaml",
"oc patch smcp.v1.maistra.io <smcp_name> --type json --patch '[{\"op\": \"replace\",\"path\":\"/spec/path/to/bad/setting\",\"value\":\"corrected-value\"}]'",
"oc edit smcp.v1.maistra.io <smcp_name>",
"oc project istio-system",
"oc get servicemeshcontrolplanes.v1.maistra.io <smcp_name> -o yaml > <smcp_name>.v1.yaml",
"oc get smcp <smcp_name> -o yaml > <smcp_name>.v2.yaml",
"oc new-project istio-system-upgrade",
"oc create -n istio-system-upgrade -f <smcp_name>.v2.yaml",
"spec: policy: type: Mixer",
"spec: telemetry: type: Mixer",
"apiVersion: authentication.istio.io/v1alpha1 kind: Policy metadata: name: productpage-mTLS-disable namespace: <namespace> spec: targets: - name: productpage",
"apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: productpage-mTLS-disable namespace: <namespace> spec: mtls: mode: DISABLE selector: matchLabels: # this should match the selector for the \"productpage\" service app: productpage",
"apiVersion: authentication.istio.io/v1alpha1 kind: Policy metadata: name: productpage-mTLS-with-JWT namespace: <namespace> spec: targets: - name: productpage ports: - number: 9000 peers: - mtls: origins: - jwt: issuer: \"https://securetoken.google.com\" audiences: - \"productpage\" jwksUri: \"https://www.googleapis.com/oauth2/v1/certs\" jwtHeaders: - \"x-goog-iap-jwt-assertion\" triggerRules: - excludedPaths: - exact: /health_check principalBinding: USE_ORIGIN",
"#require mtls for productpage:9000 apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: productpage-mTLS-with-JWT namespace: <namespace> spec: selector: matchLabels: # this should match the selector for the \"productpage\" service app: productpage portLevelMtls: 9000: mode: STRICT --- #JWT authentication for productpage apiVersion: security.istio.io/v1beta1 kind: RequestAuthentication metadata: name: productpage-mTLS-with-JWT namespace: <namespace> spec: selector: matchLabels: # this should match the selector for the \"productpage\" service app: productpage jwtRules: - issuer: \"https://securetoken.google.com\" audiences: - \"productpage\" jwksUri: \"https://www.googleapis.com/oauth2/v1/certs\" fromHeaders: - name: \"x-goog-iap-jwt-assertion\" --- #Require JWT token to access product page service from #any client to all paths except /health_check apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: productpage-mTLS-with-JWT namespace: <namespace> spec: action: ALLOW selector: matchLabels: # this should match the selector for the \"productpage\" service app: productpage rules: - to: # require JWT token to access all other paths - operation: notPaths: - /health_check from: - source: # if using principalBinding: USE_PEER in the Policy, # then use principals, e.g. # principals: # - \"*\" requestPrincipals: - \"*\" - to: # no JWT token required to access health_check - operation: paths: - /health_check",
"spec: tracing: sampling: 100 # 1% type: Jaeger",
"spec: addons: jaeger: name: jaeger install: storage: type: Memory # or Elasticsearch for production mode memory: maxTraces: 100000 elasticsearch: # the following values only apply if storage:type:=Elasticsearch storage: # specific storageclass configuration for the Jaeger Elasticsearch (optional) size: \"100G\" storageClassName: \"storageclass\" nodeCount: 3 redundancyPolicy: SingleRedundancy runtime: components: tracing.jaeger: {} # general Jaeger specific runtime configuration (optional) tracing.jaeger.elasticsearch: #runtime configuration for Jaeger Elasticsearch deployment (optional) container: resources: requests: memory: \"1Gi\" cpu: \"500m\" limits: memory: \"1Gi\"",
"spec: addons: grafana: enabled: true install: {} # customize install kiali: enabled: true name: kiali install: {} # customize install",
"oc rollout restart <deployment>",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: mode: ClusterWide meshConfig: discoverySelectors: - matchLabels: istio-discovery: enabled 1 - matchExpressions: - key: kubernetes.io/metadata.name 2 operator: In values: - bookinfo - httpbin - istio-system",
"oc -n istio-system edit smcp <name> 1",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: mode: ClusterWide meshConfig: discoverySelectors: - matchLabels: istio-discovery: enabled 1 - matchExpressions: - key: kubernetes.io/metadata.name 2 operator: In values: - bookinfo - httpbin - istio-system",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default spec: memberSelectors: - matchLabels: istio-injection: enabled 1",
"oc edit smmr -n <controlplane-namespace>",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default spec: memberSelectors: - matchLabels: istio-injection: enabled 1",
"apiVersion: apps/v1 kind: Deployment metadata: name: nginx spec: selector: matchLabels: app: nginx template: metadata: annotations: sidecar.istio.io/inject: 'true' 1 labels: app: nginx spec: containers: - name: nginx image: nginx:1.14.2 ports: - containerPort: 80 --- apiVersion: apps/v1 kind: Deployment metadata: name: nginx-without-sidecar spec: selector: matchLabels: app: nginx-without-sidecar template: metadata: labels: app: nginx-without-sidecar 2 spec: containers: - name: nginx image: nginx:1.14.2 ports: - containerPort: 80",
"oc edit deployment -n <namespace> <deploymentName>",
"apiVersion: apps/v1 kind: Deployment metadata: name: nginx spec: selector: matchLabels: app: nginx template: metadata: annotations: sidecar.istio.io/inject: 'true' 1 labels: app: nginx spec: containers: - name: nginx image: nginx:1.14.2 ports: - containerPort: 80 --- apiVersion: apps/v1 kind: Deployment metadata: name: nginx-without-sidecar spec: selector: matchLabels: app: nginx-without-sidecar template: metadata: labels: app: nginx-without-sidecar 2 spec: containers: - name: nginx image: nginx:1.14.2 ports: - containerPort: 80",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: httpbin-usernamepolicy spec: action: ALLOW rules: - when: - key: 'request.regex.headers[username]' values: - \"allowed.*\" selector: matchLabels: app: httpbin",
"oc -n openshift-operators get subscriptions",
"oc -n openshift-operators edit subscription <name> 1",
"apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: labels: operators.coreos.com/servicemeshoperator.openshift-operators: \"\" name: servicemeshoperator namespace: openshift-operators spec: config: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"oc -n openshift-operators get po -l name=istio-operator -owide",
"oc new-project istio-system",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: version: v2.6 tracing: type: None sampling: 10000 addons: kiali: enabled: true name: kiali grafana: enabled: true",
"oc create -n istio-system -f <istio_installation.yaml>",
"oc get pods -n istio-system -w",
"NAME READY STATUS RESTARTS AGE grafana-b4d59bd7-mrgbr 2/2 Running 0 65m istio-egressgateway-678dc97b4c-wrjkp 1/1 Running 0 108s istio-ingressgateway-b45c9d54d-4qg6n 1/1 Running 0 108s istiod-basic-55d78bbbcd-j5556 1/1 Running 0 108s kiali-6476c7656c-x5msp 1/1 Running 0 43m prometheus-58954b8d6b-m5std 2/2 Running 0 66m",
"oc get smcp -n istio-system",
"NAME READY STATUS PROFILES VERSION AGE basic 10/10 ComponentsReady [\"default\"] 2.6.6 66m",
"spec: runtime: defaults: pod: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"spec: runtime: components: pilot: pod: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"spec: gateways: ingress: runtime: pod: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved egress: runtime: pod: nodeSelector: 3 node-role.kubernetes.io/infra: \"\" tolerations: 4 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"oc -n istio-system edit smcp <name> 1",
"spec: runtime: defaults: pod: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"oc -n istio-system edit smcp <name> 1",
"spec: runtime: components: pilot: pod: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"spec: gateways: ingress: runtime: pod: nodeSelector: 1 node-role.kubernetes.io/infra: \"\" tolerations: 2 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved egress: runtime: pod: nodeSelector: 3 node-role.kubernetes.io/infra: \"\" tolerations: 4 - effect: NoSchedule key: node-role.kubernetes.io/infra value: reserved - effect: NoExecute key: node-role.kubernetes.io/infra value: reserved",
"oc -n istio-system get pods -owide",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: version: v2.6 mode: ClusterWide",
"oc new-project istio-system",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: version: v2.6 mode: ClusterWide",
"oc create -n istio-system -f <istio_installation.yaml>",
"oc get pods -n istio-system -w",
"NAME READY STATUS RESTARTS AGE grafana-b4d59bd7-mrgbr 2/2 Running 0 65m istio-egressgateway-678dc97b4c-wrjkp 1/1 Running 0 108s istio-ingressgateway-b45c9d54d-4qg6n 1/1 Running 0 108s istiod-basic-55d78bbbcd-j5556 1/1 Running 0 108s jaeger-67c75bd6dc-jv6k6 2/2 Running 0 65m kiali-6476c7656c-x5msp 1/1 Running 0 43m prometheus-58954b8d6b-m5std 2/2 Running 0 66m",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc new-project <your-project>",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default namespace: istio-system spec: members: # a list of projects joined into the service mesh - your-project-name - another-project-name",
"oc create -n istio-system -f servicemeshmemberroll-default.yaml",
"oc get smmr -n istio-system default",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default namespace: istio-system #control plane project spec: members: # a list of projects joined into the service mesh - your-project-name - another-project-name",
"oc edit smmr -n <controlplane-namespace>",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default namespace: istio-system #control plane project spec: members: # a list of projects joined into the service mesh - your-project-name - another-project-name",
"apiVersion: maistra.io/v1 kind: ServiceMeshMember metadata: name: default namespace: my-application spec: controlPlaneRef: namespace: istio-system name: basic",
"oc apply -f <file-name>",
"oc get smm default -n my-application",
"NAME CONTROL PLANE READY AGE default istio-system/basic True 2m11s",
"oc describe smmr default -n istio-system",
"Name: default Namespace: istio-system Labels: <none> Status: Configured Members: default my-application Members: default my-application",
"oc edit smmr default -n istio-system",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default namespace: istio-system spec: memberSelectors: 1 - matchLabels: 2 mykey: myvalue 3 - matchLabels: 4 myotherkey: myothervalue 5",
"oc new-project bookinfo",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default spec: members: - bookinfo",
"oc create -n istio-system -f servicemeshmemberroll-default.yaml",
"oc get smmr -n istio-system -o wide",
"NAME READY STATUS AGE MEMBERS default 1/1 Configured 70s [\"bookinfo\"]",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/platform/kube/bookinfo.yaml",
"service/details created serviceaccount/bookinfo-details created deployment.apps/details-v1 created service/ratings created serviceaccount/bookinfo-ratings created deployment.apps/ratings-v1 created service/reviews created serviceaccount/bookinfo-reviews created deployment.apps/reviews-v1 created deployment.apps/reviews-v2 created deployment.apps/reviews-v3 created service/productpage created serviceaccount/bookinfo-productpage created deployment.apps/productpage-v1 created",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/bookinfo-gateway.yaml",
"gateway.networking.istio.io/bookinfo-gateway created virtualservice.networking.istio.io/bookinfo created",
"export GATEWAY_URL=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.host}')",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/destination-rule-all.yaml",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/destination-rule-all-mtls.yaml",
"destinationrule.networking.istio.io/productpage created destinationrule.networking.istio.io/reviews created destinationrule.networking.istio.io/ratings created destinationrule.networking.istio.io/details created",
"oc get pods -n bookinfo",
"NAME READY STATUS RESTARTS AGE details-v1-55b869668-jh7hb 2/2 Running 0 12m productpage-v1-6fc77ff794-nsl8r 2/2 Running 0 12m ratings-v1-7d7d8d8b56-55scn 2/2 Running 0 12m reviews-v1-868597db96-bdxgq 2/2 Running 0 12m reviews-v2-5b64f47978-cvssp 2/2 Running 0 12m reviews-v3-6dfd49b55b-vcwpf 2/2 Running 0 12m",
"echo \"http://USDGATEWAY_URL/productpage\"",
"oc delete project bookinfo",
"oc -n istio-system patch --type='json' smmr default -p '[{\"op\": \"remove\", \"path\": \"/spec/members\", \"value\":[\"'\"bookinfo\"'\"]}]'",
"oc get deployment -n <namespace>",
"get deployment -n bookinfo ratings-v1 -o yaml",
"apiVersion: apps/v1 kind: Deployment metadata: name: ratings-v1 namespace: bookinfo labels: app: ratings version: v1 spec: template: metadata: labels: sidecar.istio.io/inject: 'true'",
"oc apply -n <namespace> -f deployment.yaml",
"oc apply -n bookinfo -f deployment-ratings-v1.yaml",
"oc get deployment -n <namespace> <deploymentName> -o yaml",
"oc get deployment -n bookinfo ratings-v1 -o yaml",
"apiVersion: apps/v1 kind: Deployment metadata: name: resource spec: replicas: 7 selector: matchLabels: app: resource template: metadata: annotations: sidecar.maistra.io/proxyEnv: \"{ \\\"maistra_test_env\\\": \\\"env_value\\\", \\\"maistra_test_env_2\\\": \\\"env_value_2\\\" }\"",
"oc patch deployment/<deployment> -p '{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/restartedAt\": \"'`date -Iseconds`'\"}}}}}'",
"oc policy add-role-to-user -n istio-system --role-namespace istio-system mesh-user <user_name>",
"apiVersion: maistra.io/v1 kind: ServiceMeshMember metadata: name: default spec: controlPlaneRef: namespace: istio-system name: basic",
"oc policy add-role-to-user",
"apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: namespace: istio-system name: mesh-users roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: mesh-user subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: alice",
"oc create configmap --from-file=<profiles-directory> smcp-templates -n openshift-operators",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: profiles: - default",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: version: v2.6 security: dataPlane: mtls: true",
"apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: default namespace: <namespace> spec: mtls: mode: STRICT",
"oc create -n <namespace> -f <policy.yaml>",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: default namespace: <namespace> spec: host: \"*.<namespace>.svc.cluster.local\" trafficPolicy: tls: mode: ISTIO_MUTUAL",
"oc create -n <namespace> -f <destination-rule.yaml>",
"kind: ServiceMeshControlPlane spec: security: controlPlane: tls: minProtocolVersion: TLSv1_2",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: ingress-policy namespace: istio-system spec: selector: matchLabels: app: istio-ingressgateway action: DENY rules: - from: - source: ipBlocks: [\"1.2.3.4\"]",
"oc create -n istio-system -f <filename>",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: httpbin-deny namespace: bookinfo spec: selector: matchLabels: app: httpbin version: v1 action: DENY rules: - from: - source: notNamespaces: [\"bookinfo\"]",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: allow-all namespace: bookinfo spec: action: ALLOW rules: - {}",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: deny-all namespace: bookinfo spec: {}",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: ingress-policy namespace: istio-system spec: selector: matchLabels: app: istio-ingressgateway action: ALLOW rules: - from: - source: ipBlocks: [\"1.2.3.4\", \"5.6.7.0/24\"]",
"apiVersion: \"security.istio.io/v1beta1\" kind: \"RequestAuthentication\" metadata: name: \"jwt-example\" namespace: bookinfo spec: selector: matchLabels: app: httpbin jwtRules: - issuer: \"http://localhost:8080/auth/realms/master\" jwksUri: \"http://keycloak.default.svc:8080/auth/realms/master/protocol/openid-connect/certs\"",
"apiVersion: \"security.istio.io/v1beta1\" kind: \"AuthorizationPolicy\" metadata: name: \"frontend-ingress\" namespace: bookinfo spec: selector: matchLabels: app: httpbin action: DENY rules: - from: - source: notRequestPrincipals: [\"*\"]",
"oc edit smcp <smcp-name>",
"spec: security: dataPlane: mtls: true # enable mtls for data plane # JWKSResolver extra CA # PEM-encoded certificate content to trust an additional CA jwksResolverCA: | -----BEGIN CERTIFICATE----- [...] [...] -----END CERTIFICATE-----",
"kind: ConfigMap apiVersion: v1 data: extra.pem: | -----BEGIN CERTIFICATE----- [...] [...] -----END CERTIFICATE-----",
"oc create secret generic cacerts -n istio-system --from-file=<path>/ca-cert.pem --from-file=<path>/ca-key.pem --from-file=<path>/root-cert.pem --from-file=<path>/cert-chain.pem",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: security: dataPlane: mtls: true certificateAuthority: type: Istiod istiod: type: PrivateKey privateKey: rootCADir: /etc/cacerts",
"oc -n istio-system delete pods -l 'app in (istiod,istio-ingressgateway, istio-egressgateway)'",
"oc -n bookinfo delete pods --all",
"pod \"details-v1-6cd699df8c-j54nh\" deleted pod \"productpage-v1-5ddcb4b84f-mtmf2\" deleted pod \"ratings-v1-bdbcc68bc-kmng4\" deleted pod \"reviews-v1-754ddd7b6f-lqhsv\" deleted pod \"reviews-v2-675679877f-q67r2\" deleted pod \"reviews-v3-79d7549c7-c2gjs\" deleted",
"oc get pods -n bookinfo",
"sleep 60 oc -n bookinfo exec \"USD(oc -n bookinfo get pod -l app=productpage -o jsonpath={.items..metadata.name})\" -c istio-proxy -- openssl s_client -showcerts -connect details:9080 > bookinfo-proxy-cert.txt sed -n '/-----BEGIN CERTIFICATE-----/{:start /-----END CERTIFICATE-----/!{N;b start};/.*/p}' bookinfo-proxy-cert.txt > certs.pem awk 'BEGIN {counter=0;} /BEGIN CERT/{counter++} { print > \"proxy-cert-\" counter \".pem\"}' < certs.pem",
"openssl x509 -in <path>/root-cert.pem -text -noout > /tmp/root-cert.crt.txt",
"openssl x509 -in ./proxy-cert-3.pem -text -noout > /tmp/pod-root-cert.crt.txt",
"diff -s /tmp/root-cert.crt.txt /tmp/pod-root-cert.crt.txt",
"openssl x509 -in <path>/ca-cert.pem -text -noout > /tmp/ca-cert.crt.txt",
"openssl x509 -in ./proxy-cert-2.pem -text -noout > /tmp/pod-cert-chain-ca.crt.txt",
"diff -s /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt",
"openssl verify -CAfile <(cat <path>/ca-cert.pem <path>/root-cert.pem) ./proxy-cert-1.pem",
"oc delete secret cacerts -n istio-system",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: security: dataPlane: mtls: true",
"apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: selfsigned-root-issuer namespace: cert-manager spec: selfSigned: {} --- apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: root-ca namespace: cert-manager spec: isCA: true duration: 21600h # 900d secretName: root-ca commonName: root-ca.my-company.net subject: organizations: - my-company.net issuerRef: name: selfsigned-root-issuer kind: Issuer group: cert-manager.io --- apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: name: root-ca spec: ca: secretName: root-ca",
"oc apply -f cluster-issuer.yaml",
"apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: istio-ca namespace: istio-system spec: isCA: true duration: 21600h secretName: istio-ca commonName: istio-ca.my-company.net subject: organizations: - my-company.net issuerRef: name: root-ca kind: ClusterIssuer group: cert-manager.io --- apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: istio-ca namespace: istio-system spec: ca: secretName: istio-ca",
"oc apply -n istio-system -f istio-ca.yaml",
"helm install istio-csr jetstack/cert-manager-istio-csr -n istio-system -f deploy/examples/cert-manager/istio-csr/istio-csr.yaml",
"replicaCount: 2 image: repository: quay.io/jetstack/cert-manager-istio-csr tag: v0.6.0 pullSecretName: \"\" app: certmanager: namespace: istio-system issuer: group: cert-manager.io kind: Issuer name: istio-ca controller: configmapNamespaceSelector: \"maistra.io/member-of=istio-system\" leaderElectionNamespace: istio-system istio: namespace: istio-system revisions: [\"basic\"] server: maxCertificateDuration: 5m tls: certificateDNSNames: # This DNS name must be set in the SMCP spec.security.certificateAuthority.cert-manager.address - cert-manager-istio-csr.istio-system.svc",
"oc apply -f mesh.yaml -n istio-system",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: addons: grafana: enabled: false kiali: enabled: false prometheus: enabled: false proxy: accessLogging: file: name: /dev/stdout security: certificateAuthority: cert-manager: address: cert-manager-istio-csr.istio-system.svc:443 type: cert-manager dataPlane: mtls: true identity: type: ThirdParty tracing: type: None --- apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default spec: members: - httpbin - sleep",
"oc new-project <namespace>",
"oc apply -f https://raw.githubusercontent.com/maistra/istio/maistra-2.4/samples/httpbin/httpbin.yaml",
"oc apply -f https://raw.githubusercontent.com/maistra/istio/maistra-2.4/samples/sleep/sleep.yaml",
"oc exec \"USD(oc get pod -l app=sleep -n <namespace> -o jsonpath={.items..metadata.name})\" -c sleep -n <namespace> -- curl http://httpbin.<namespace>:8000/ip -s -o /dev/null -w \"%{http_code}\\n\"",
"200",
"oc apply -n <namespace> -f https://raw.githubusercontent.com/maistra/istio/maistra-2.4/samples/httpbin/httpbin-gateway.yaml",
"INGRESS_HOST=USD(oc -n istio-system get routes istio-ingressgateway -o jsonpath='{.spec.host}')",
"curl -s -I http://USDINGRESS_HOST/headers -o /dev/null -w \"%{http_code}\" -s",
"apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: ext-host-gwy spec: selector: istio: ingressgateway # use istio default controller servers: - port: number: 443 name: https protocol: HTTPS hosts: - ext-host.example.com tls: mode: SIMPLE serverCertificate: /tmp/tls.crt privateKey: /tmp/tls.key",
"apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: virtual-svc spec: hosts: - ext-host.example.com gateways: - ext-host-gwy",
"apiVersion: v1 kind: Service metadata: name: istio-ingressgateway namespace: istio-ingress spec: type: ClusterIP selector: istio: ingressgateway ports: - name: http2 port: 80 targetPort: 8080 - name: https port: 443 targetPort: 8443 --- apiVersion: apps/v1 kind: Deployment metadata: name: istio-ingressgateway namespace: istio-ingress spec: selector: matchLabels: istio: ingressgateway template: metadata: annotations: inject.istio.io/templates: gateway labels: istio: ingressgateway sidecar.istio.io/inject: \"true\" 1 spec: containers: - name: istio-proxy image: auto 2",
"apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: istio-ingressgateway-sds namespace: istio-ingress rules: - apiGroups: [\"\"] resources: [\"secrets\"] verbs: [\"get\", \"watch\", \"list\"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: istio-ingressgateway-sds namespace: istio-ingress roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: istio-ingressgateway-sds subjects: - kind: ServiceAccount name: default",
"apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: gatewayingress namespace: istio-ingress spec: podSelector: matchLabels: istio: ingressgateway ingress: - {} policyTypes: - Ingress",
"apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: labels: istio: ingressgateway release: istio name: ingressgatewayhpa namespace: istio-ingress spec: maxReplicas: 5 metrics: - resource: name: cpu target: averageUtilization: 80 type: Utilization type: Resource minReplicas: 2 scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: istio-ingressgateway",
"apiVersion: policy/v1 kind: PodDisruptionBudget metadata: labels: istio: ingressgateway release: istio name: ingressgatewaypdb namespace: istio-ingress spec: minAvailable: 1 selector: matchLabels: istio: ingressgateway",
"oc get svc istio-ingressgateway -n istio-system",
"export INGRESS_HOST=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')",
"export INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"http2\")].port}')",
"export SECURE_INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"https\")].port}')",
"export TCP_INGRESS_PORT=USD(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"tcp\")].port}')",
"export INGRESS_HOST=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')",
"export INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"http2\")].nodePort}')",
"export SECURE_INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"https\")].nodePort}')",
"export TCP_INGRESS_PORT=USD(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"tcp\")].nodePort}')",
"apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: bookinfo-gateway spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - \"*\"",
"oc apply -f gateway.yaml",
"apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: bookinfo spec: hosts: - \"*\" gateways: - bookinfo-gateway http: - match: - uri: exact: /productpage - uri: prefix: /static - uri: exact: /login - uri: exact: /logout - uri: prefix: /api/v1/products route: - destination: host: productpage port: number: 9080",
"oc apply -f vs.yaml",
"export GATEWAY_URL=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.host}')",
"export TARGET_PORT=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.port.targetPort}')",
"curl -s -I \"USDGATEWAY_URL/productpage\"",
"apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway1 spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - www.bookinfo.com - bookinfo.example.com",
"oc -n istio-system get routes",
"NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD gateway1-lvlfn bookinfo.example.com istio-ingressgateway <all> None gateway1-scqhv www.bookinfo.com istio-ingressgateway <all> None",
"apiVersion: maistra.io/v1alpha1 kind: ServiceMeshControlPlane metadata: namespace: istio-system spec: gateways: openshiftRoute: enabled: false",
"apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: svc-entry spec: hosts: - ext-svc.example.com ports: - number: 443 name: https protocol: HTTPS location: MESH_EXTERNAL resolution: DNS",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: ext-res-dr spec: host: ext-svc.example.com trafficPolicy: tls: mode: MUTUAL clientCertificate: /etc/certs/myclientcert.pem privateKey: /etc/certs/client_private_key.pem caCertificates: /etc/certs/rootcacerts.pem",
"apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: reviews spec: hosts: - reviews http: - match: - headers: end-user: exact: jason route: - destination: host: reviews subset: v2 - route: - destination: host: reviews subset: v3",
"oc apply -f <VirtualService.yaml>",
"spec: hosts:",
"spec: http: - match:",
"spec: http: - match: - destination:",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: my-destination-rule spec: host: my-svc trafficPolicy: loadBalancer: simple: RANDOM subsets: - name: v1 labels: version: v1 - name: v2 labels: version: v2 trafficPolicy: loadBalancer: simple: ROUND_ROBIN - name: v3 labels: version: v3",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: security: manageNetworkPolicy: false",
"apiVersion: networking.istio.io/v1alpha3 kind: Sidecar metadata: name: default namespace: bookinfo spec: egress: - hosts: - \"./*\" - \"istio-system/*\"",
"oc apply -f sidecar.yaml",
"oc get sidecar",
"oc apply -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/virtual-service-all-v1.yaml",
"oc get virtualservices -o yaml",
"export GATEWAY_URL=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.host}')",
"echo \"http://USDGATEWAY_URL/productpage\"",
"oc apply -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml",
"oc get virtualservice reviews -o yaml",
"apiVersion: apps/v1 kind: Deployment metadata: name: istio-ingressgateway-canary namespace: istio-system 1 spec: selector: matchLabels: app: istio-ingressgateway istio: ingressgateway template: metadata: annotations: inject.istio.io/templates: gateway labels: 2 app: istio-ingressgateway istio: ingressgateway sidecar.istio.io/inject: \"true\" spec: containers: - name: istio-proxy image: auto serviceAccountName: istio-ingressgateway --- apiVersion: v1 kind: ServiceAccount metadata: name: istio-ingressgateway namespace: istio-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: secret-reader namespace: istio-system rules: - apiGroups: [\"\"] resources: [\"secrets\"] verbs: [\"get\", \"watch\", \"list\"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: istio-ingressgateway-secret-reader namespace: istio-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: secret-reader subjects: - kind: ServiceAccount name: istio-ingressgateway --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy 3 metadata: name: gatewayingress namespace: istio-system spec: podSelector: matchLabels: istio: ingressgateway ingress: - {} policyTypes: - Ingress",
"oc scale -n istio-system deployment/<new_gateway_deployment> --replicas <new_number_of_replicas>",
"oc scale -n istio-system deployment/<old_gateway_deployment> --replicas <new_number_of_replicas>",
"oc label service -n istio-system istio-ingressgateway app.kubernetes.io/managed-by-",
"oc patch service -n istio-system istio-ingressgateway --type='json' -p='[{\"op\": \"remove\", \"path\": \"/metadata/ownerReferences\"}]'",
"oc patch smcp -n istio-system <smcp_name> --type='json' -p='[{\"op\": \"replace\", \"path\": \"/spec/gateways/ingress/enabled\", \"value\": false}]'",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane spec: gateways: openshiftRoute: enabled: false",
"kind: Route apiVersion: route.openshift.io/v1 metadata: name: example-gateway namespace: istio-system 1 spec: host: www.example.com to: kind: Service name: istio-ingressgateway 2 weight: 100 port: targetPort: http2 wildcardPolicy: None",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc project istio-system",
"oc get routes",
"NAME HOST/PORT SERVICES PORT TERMINATION bookinfo-gateway bookinfo-gateway-yourcompany.com istio-ingressgateway http2 grafana grafana-yourcompany.com grafana <all> reencrypt/Redirect istio-ingressgateway istio-ingress-yourcompany.com istio-ingressgateway 8080 jaeger jaeger-yourcompany.com jaeger-query <all> reencrypt kiali kiali-yourcompany.com kiali 20001 reencrypt/Redirect prometheus prometheus-yourcompany.com prometheus <all> reencrypt/Redirect",
"curl \"http://USDGATEWAY_URL/productpage\"",
"apiVersion: opentelemetry.io/v1alpha1 kind: OpenTelemetryCollector metadata: name: otel namespace: bookinfo 1 spec: mode: deployment config: | receivers: otlp: protocols: grpc: endpoint: 0.0.0.0:4317 exporters: otlp: endpoint: \"tempo-sample-distributor.tracing-system.svc.cluster.local:4317\" 2 tls: insecure: true service: pipelines: traces: receivers: [otlp] processors: [] exporters: [otlp]",
"oc logs -n bookinfo -l app.kubernetes.io/name=otel-collector",
"kind: ServiceMeshControlPlane apiVersion: maistra.io/v2 metadata: name: basic namespace: istio-system spec: addons: grafana: enabled: false kiali: enabled: true prometheus: enabled: true meshConfig: extensionProviders: - name: otel opentelemetry: port: 4317 service: otel-collector.bookinfo.svc.cluster.local policy: type: Istiod telemetry: type: Istiod version: v2.6",
"spec: tracing: type: None",
"apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: mesh-default namespace: istio-system spec: tracing: - providers: - name: otel randomSamplingPercentage: 100",
"apiVersion: kiali.io/v1alpha1 kind: Kiali spec: external_services: tracing: query_timeout: 30 1 enabled: true in_cluster_url: 'http://tempo-sample-query-frontend.tracing-system.svc.cluster.local:16685' url: '[Tempo query frontend Route url]' use_grpc: true 2",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: otel-disable-tls spec: host: \"otel-collector.bookinfo.svc.cluster.local\" trafficPolicy: tls: mode: DISABLE",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: tempo namespace: tracing-system-mtls spec: host: \"*.tracing-system-mtls.svc.cluster.local\" trafficPolicy: tls: mode: DISABLE",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: kiali namespace: istio-system spec: host: kiali.istio-system.svc.cluster.local trafficPolicy: tls: mode: DISABLE",
"spec: addons: jaeger: name: distr-tracing-production",
"spec: tracing: sampling: 100",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc get route -n istio-system jaeger -o jsonpath='{.spec.host}'",
"apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kiali-monitoring-rbac roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-monitoring-view subjects: - kind: ServiceAccount name: kiali-service-account namespace: istio-system",
"apiVersion: kiali.io/v1alpha1 kind: Kiali metadata: name: kiali-user-workload-monitoring namespace: istio-system spec: external_services: prometheus: auth: type: bearer use_kiali_token: true query_scope: mesh_id: \"basic-istio-system\" thanos_proxy: enabled: true url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091",
"apiVersion: kiali.io/v1alpha1 kind: Kiali metadata: name: kiali-user-workload-monitoring namespace: istio-system spec: external_services: istio: config_map_name: istio-<smcp-name> istio_sidecar_injector_config_map_name: istio-sidecar-injector-<smcp-name> istiod_deployment_name: istiod-<smcp-name> url_service_version: 'http://istiod-<smcp-name>.istio-system:15014/version' prometheus: auth: token: secret:thanos-querier-web-token:token type: bearer use_kiali_token: false query_scope: mesh_id: \"basic-istio-system\" thanos_proxy: enabled: true url: https://thanos-querier.openshift-monitoring.svc.cluster.local:9091 version: v1.65",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: addons: prometheus: enabled: false 1 grafana: enabled: false 2 kiali: name: kiali-user-workload-monitoring meshConfig: extensionProviders: - name: prometheus prometheus: {}",
"apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: user-workload-access namespace: istio-system 1 spec: ingress: - from: - namespaceSelector: matchLabels: network.openshift.io/policy-group: monitoring podSelector: {} policyTypes: - Ingress",
"apiVersion: telemetry.istio.io/v1alpha1 kind: Telemetry metadata: name: enable-prometheus-metrics namespace: istio-system 1 spec: selector: 2 matchLabels: app: bookinfo metrics: - providers: - name: prometheus",
"apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: istiod-monitor namespace: istio-system 1 spec: targetLabels: - app selector: matchLabels: istio: pilot endpoints: - port: http-monitoring interval: 30s relabelings: - action: replace replacement: \"basic-istio-system\" 2 targetLabel: mesh_id",
"apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: name: istio-proxies-monitor namespace: istio-system 1 spec: selector: matchExpressions: - key: istio-prometheus-ignore operator: DoesNotExist podMetricsEndpoints: - path: /stats/prometheus interval: 30s relabelings: - action: keep sourceLabels: [__meta_kubernetes_pod_container_name] regex: \"istio-proxy\" - action: keep sourceLabels: [__meta_kubernetes_pod_annotationpresent_prometheus_io_scrape] - action: replace regex: (\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) replacement: '[USD2]:USD1' sourceLabels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] targetLabel: __address__ - action: replace regex: (\\d+);((([0-9]+?)(\\.|USD)){4}) replacement: USD2:USD1 sourceLabels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] targetLabel: __address__ - action: labeldrop regex: \"__meta_kubernetes_pod_label_(.+)\" - sourceLabels: [__meta_kubernetes_namespace] action: replace targetLabel: namespace - sourceLabels: [__meta_kubernetes_pod_name] action: replace targetLabel: pod_name - action: replace replacement: \"basic-istio-system\" 2 targetLabel: mesh_id",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: version: v2.6 proxy: runtime: container: resources: requests: cpu: 600m memory: 50Mi limits: {} runtime: components: pilot: container: resources: requests: cpu: 1000m memory: 1.6Gi limits: {} kiali: container: resources: limits: cpu: \"90m\" memory: \"245Mi\" requests: cpu: \"30m\" memory: \"108Mi\" global.oauthproxy: container: resources: requests: cpu: \"101m\" memory: \"256Mi\" limits: cpu: \"201m\" memory: \"512Mi\"",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 100 type: Jaeger addons: jaeger: name: MyJaeger install: storage: type: Elasticsearch ingress: enabled: true runtime: components: tracing.jaeger.elasticsearch: # only supports resources and image name container: resources: {}",
"oc get smcp basic -o yaml",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: red-mesh namespace: red-mesh-system spec: version: v2.6 runtime: defaults: container: imagePullPolicy: Always gateways: additionalEgress: egress-green-mesh: enabled: true requestedNetworkView: - green-network service: metadata: labels: federation.maistra.io/egress-for: egress-green-mesh ports: - port: 15443 name: tls - port: 8188 name: http-discovery #note HTTP here egress-blue-mesh: enabled: true requestedNetworkView: - blue-network service: metadata: labels: federation.maistra.io/egress-for: egress-blue-mesh ports: - port: 15443 name: tls - port: 8188 name: http-discovery #note HTTP here additionalIngress: ingress-green-mesh: enabled: true service: type: LoadBalancer metadata: labels: federation.maistra.io/ingress-for: ingress-green-mesh ports: - port: 15443 name: tls - port: 8188 name: https-discovery #note HTTPS here ingress-blue-mesh: enabled: true service: type: LoadBalancer metadata: labels: federation.maistra.io/ingress-for: ingress-blue-mesh ports: - port: 15443 name: tls - port: 8188 name: https-discovery #note HTTPS here security: trust: domain: red-mesh.local",
"spec: cluster: name:",
"spec: cluster: network:",
"spec: gateways: additionalEgress: <egress_name>:",
"spec: gateways: additionalEgress: <egress_name>: enabled:",
"spec: gateways: additionalEgress: <egress_name>: requestedNetworkView:",
"spec: gateways: additionalEgress: <egress_name>: service: metadata: labels: federation.maistra.io/egress-for:",
"spec: gateways: additionalEgress: <egress_name>: service: ports:",
"spec: gateways: additionalIngress:",
"spec: gateways: additionalIgress: <ingress_name>: enabled:",
"spec: gateways: additionalIngress: <ingress_name>: service: type:",
"spec: gateways: additionalIngress: <ingress_name>: service: type:",
"spec: gateways: additionalIngress: <ingress_name>: service: metadata: labels: federation.maistra.io/ingress-for:",
"spec: gateways: additionalIngress: <ingress_name>: service: ports:",
"spec: gateways: additionalIngress: <ingress_name>: service: ports: nodePort:",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: green-mesh namespace: green-mesh-system spec: gateways: additionalIngress: ingress-green-mesh: enabled: true service: type: NodePort metadata: labels: federation.maistra.io/ingress-for: ingress-green-mesh ports: - port: 15443 nodePort: 30510 name: tls - port: 8188 nodePort: 32359 name: https-discovery",
"kind: ServiceMeshControlPlane metadata: name: red-mesh namespace: red-mesh-system spec: security: trust: domain: red-mesh.local",
"spec: security: trust: domain:",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc project red-mesh-system",
"oc edit -n red-mesh-system smcp red-mesh",
"oc get smcp -n red-mesh-system",
"NAME READY STATUS PROFILES VERSION AGE red-mesh 10/10 ComponentsReady [\"default\"] 2.1.0 4m25s",
"kind: ServiceMeshPeer apiVersion: federation.maistra.io/v1 metadata: name: green-mesh namespace: red-mesh-system spec: remote: addresses: - ingress-red-mesh.green-mesh-system.apps.domain.com gateways: ingress: name: ingress-green-mesh egress: name: egress-green-mesh security: trustDomain: green-mesh.local clientID: green-mesh.local/ns/green-mesh-system/sa/egress-red-mesh-service-account certificateChain: kind: ConfigMap name: green-mesh-ca-root-cert",
"metadata: name:",
"metadata: namespace:",
"spec: remote: addresses:",
"spec: remote: discoveryPort:",
"spec: remote: servicePort:",
"spec: gateways: ingress: name:",
"spec: gateways: egress: name:",
"spec: security: trustDomain:",
"spec: security: clientID:",
"spec: security: certificateChain: kind: ConfigMap name:",
"oc login --username=<NAMEOFUSER> <API token> https://<HOSTNAME>:6443",
"oc project red-mesh-system",
"kind: ServiceMeshPeer apiVersion: federation.maistra.io/v1 metadata: name: green-mesh namespace: red-mesh-system spec: remote: addresses: - ingress-red-mesh.green-mesh-system.apps.domain.com gateways: ingress: name: ingress-green-mesh egress: name: egress-green-mesh security: trustDomain: green-mesh.local clientID: green-mesh.local/ns/green-mesh-system/sa/egress-red-mesh-service-account certificateChain: kind: ConfigMap name: green-mesh-ca-root-cert",
"oc create -n red-mesh-system -f servicemeshpeer.yaml",
"oc -n red-mesh-system get servicemeshpeer green-mesh -o yaml",
"status: discoveryStatus: active: - pod: istiod-red-mesh-b65457658-9wq5j remotes: - connected: true lastConnected: \"2021-10-05T13:02:25Z\" lastFullSync: \"2021-10-05T13:02:25Z\" source: 10.128.2.149 watch: connected: true lastConnected: \"2021-10-05T13:02:55Z\" lastDisconnectStatus: 503 Service Unavailable lastFullSync: \"2021-10-05T13:05:43Z\"",
"kind: ExportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: green-mesh namespace: red-mesh-system spec: exportRules: # export ratings.mesh-x-bookinfo as ratings.bookinfo - type: NameSelector nameSelector: namespace: red-mesh-bookinfo name: red-ratings alias: namespace: bookinfo name: ratings # export any service in red-mesh-bookinfo namespace with label export-service=true - type: LabelSelector labelSelector: namespace: red-mesh-bookinfo selector: matchLabels: export-service: \"true\" aliases: # export all matching services as if they were in the bookinfo namespace - namespace: \"*\" name: \"*\" alias: namespace: bookinfo",
"metadata: name:",
"metadata: namespace:",
"spec: exportRules: - type:",
"spec: exportRules: - type: NameSelector nameSelector: namespace: name:",
"spec: exportRules: - type: NameSelector nameSelector: alias: namespace: name:",
"spec: exportRules: - type: LabelSelector labelSelector: namespace: <exportingMesh> selector: matchLabels: <labelKey>: <labelValue>",
"spec: exportRules: - type: LabelSelector labelSelector: namespace: <exportingMesh> selector: matchLabels: <labelKey>: <labelValue> aliases: - namespace: name: alias: namespace: name:",
"kind: ExportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: blue-mesh namespace: red-mesh-system spec: exportRules: - type: NameSelector nameSelector: namespace: \"*\" name: ratings",
"kind: ExportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: green-mesh namespace: red-mesh-system spec: exportRules: - type: NameSelector nameSelector: namespace: west-data-center name: \"*\"",
"oc login --username=<NAMEOFUSER> <API token> https://<HOSTNAME>:6443",
"oc project red-mesh-system",
"apiVersion: federation.maistra.io/v1 kind: ExportedServiceSet metadata: name: green-mesh namespace: red-mesh-system spec: exportRules: - type: NameSelector nameSelector: namespace: red-mesh-bookinfo name: ratings alias: namespace: bookinfo name: red-ratings - type: NameSelector nameSelector: namespace: red-mesh-bookinfo name: reviews",
"oc create -n <ControlPlaneNamespace> -f <ExportedServiceSet.yaml>",
"oc create -n red-mesh-system -f export-to-green-mesh.yaml",
"oc get exportedserviceset <PeerMeshExportedTo> -o yaml",
"oc -n red-mesh-system get exportedserviceset green-mesh -o yaml",
"status: exportedServices: - exportedName: red-ratings.bookinfo.svc.green-mesh-exports.local localService: hostname: ratings.red-mesh-bookinfo.svc.cluster.local name: ratings namespace: red-mesh-bookinfo - exportedName: reviews.red-mesh-bookinfo.svc.green-mesh-exports.local localService: hostname: reviews.red-mesh-bookinfo.svc.cluster.local name: reviews namespace: red-mesh-bookinfo",
"kind: ImportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: red-mesh #name of mesh that exported the service namespace: green-mesh-system #mesh namespace that service is being imported into spec: importRules: # first matching rule is used # import ratings.bookinfo as ratings.bookinfo - type: NameSelector importAsLocal: false nameSelector: namespace: bookinfo name: ratings alias: # service will be imported as ratings.bookinfo.svc.red-mesh-imports.local namespace: bookinfo name: ratings",
"metadata: name:",
"metadata: namespace:",
"spec: importRules: - type:",
"spec: importRules: - type: NameSelector nameSelector: namespace: name:",
"spec: importRules: - type: NameSelector importAsLocal:",
"spec: importRules: - type: NameSelector nameSelector: namespace: name: alias: namespace: name:",
"kind: ImportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: red-mesh namespace: blue-mesh-system spec: importRules: - type: NameSelector importAsLocal: false nameSelector: namespace: bookinfo name: ratings",
"kind: ImportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: red-mesh namespace: green-mesh-system spec: importRules: - type: NameSelector importAsLocal: false nameSelector: namespace: west-data-center name: \"*\"",
"oc login --username=<NAMEOFUSER> <API token> https://<HOSTNAME>:6443",
"oc project green-mesh-system",
"kind: ImportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: red-mesh namespace: green-mesh-system spec: importRules: - type: NameSelector importAsLocal: false nameSelector: namespace: bookinfo name: red-ratings alias: namespace: bookinfo name: ratings",
"oc create -n <ControlPlaneNamespace> -f <ImportedServiceSet.yaml>",
"oc create -n green-mesh-system -f import-from-red-mesh.yaml",
"oc get importedserviceset <PeerMeshImportedInto> -o yaml",
"oc -n green-mesh-system get importedserviceset/red-mesh -o yaml",
"status: importedServices: - exportedName: red-ratings.bookinfo.svc.green-mesh-exports.local localService: hostname: ratings.bookinfo.svc.red-mesh-imports.local name: ratings namespace: bookinfo - exportedName: reviews.red-mesh-bookinfo.svc.green-mesh-exports.local localService: hostname: \"\" name: \"\" namespace: \"\"",
"kind: ImportedServiceSet apiVersion: federation.maistra.io/v1 metadata: name: red-mesh #name of mesh that exported the service namespace: green-mesh-system #mesh namespace that service is being imported into spec: importRules: # first matching rule is used # import ratings.bookinfo as ratings.bookinfo - type: NameSelector importAsLocal: true nameSelector: namespace: bookinfo name: ratings alias: # service will be imported as ratings.bookinfo.svc.red-mesh-imports.local namespace: bookinfo name: ratings #Locality within which imported services should be associated. locality: region: us-west",
"oc login --username=<NAMEOFUSER> <API token> https://<HOSTNAME>:6443",
"oc project <smcp-system>",
"oc project green-mesh-system",
"oc edit -n <smcp-system> -f <ImportedServiceSet.yaml>",
"oc edit -n green-mesh-system -f import-from-red-mesh.yaml",
"oc login --username=<NAMEOFUSER> <API token> https://<HOSTNAME>:6443",
"oc project <smcp-system>",
"oc project green-mesh-system",
"apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: default-failover namespace: bookinfo spec: host: \"ratings.bookinfo.svc.cluster.local\" trafficPolicy: loadBalancer: localityLbSetting: enabled: true failover: - from: us-east to: us-west outlierDetection: consecutive5xxErrors: 3 interval: 10s baseEjectionTime: 1m",
"oc create -n <application namespace> -f <DestinationRule.yaml>",
"oc create -n bookinfo -f green-mesh-us-west-DestinationRule.yaml",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: openid-connect namespace: istio-ingress spec: selector: matchLabels: istio: ingressgateway url: file:///opt/filters/openid.wasm sha256: 1ef0c9a92b0420cf25f7fe5d481b231464bc88f486ca3b9c83ed5cc21d2f6210 phase: AUTHN pluginConfig: openid_server: authn openid_realm: ingress",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: openid-connect namespace: istio-system spec: selector: matchLabels: istio: ingressgateway url: oci://private-registry:5000/openid-connect/openid:latest imagePullPolicy: IfNotPresent imagePullSecret: private-registry-pull-secret phase: AUTHN pluginConfig: openid_server: authn openid_realm: ingress",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: openid-connect namespace: istio-system spec: selector: matchLabels: istio: ingressgateway url: oci://private-registry:5000/openid-connect/openid:latest imagePullPolicy: IfNotPresent imagePullSecret: private-registry-pull-secret phase: AUTHN pluginConfig: openid_server: authn openid_realm: ingress",
"oc apply -f plugin.yaml",
"schemaVersion: 1 name: <your-extension> description: <description> version: 1.0.0 phase: PreAuthZ priority: 100 module: extension.wasm",
"apiVersion: maistra.io/v1 kind: ServiceMeshExtension metadata: name: header-append namespace: istio-system spec: workloadSelector: labels: app: httpbin config: first-header: some-value another-header: another-value image: quay.io/maistra-dev/header-append-filter:2.1 phase: PostAuthZ priority: 100",
"oc apply -f <extension>.yaml",
"apiVersion: maistra.io/v1 kind: ServiceMeshExtension metadata: name: header-append namespace: istio-system spec: workloadSelector: labels: app: httpbin config: first-header: some-value another-header: another-value image: quay.io/maistra-dev/header-append-filter:2.2 phase: PostAuthZ priority: 100",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: header-append namespace: istio-system spec: selector: matchLabels: app: httpbin url: oci://quay.io/maistra-dev/header-append-filter:2.2 phase: STATS pluginConfig: first-header: some-value another-header: another-value",
"cat <<EOM | oc apply -f - apiVersion: kiali.io/v1alpha1 kind: OSSMConsole metadata: namespace: openshift-operators name: ossmconsole EOM",
"delete ossmconsoles <custom_resource_name> -n <custom_resource_namespace>",
"for r in USD(oc get ossmconsoles --ignore-not-found=true --all-namespaces -o custom-columns=NS:.metadata.namespace,N:.metadata.name --no-headers | sed 's/ */:/g'); do oc delete ossmconsoles -n USD(echo USDr|cut -d: -f1) USD(echo USDr|cut -d: -f2); done",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> namespace: <bookinfo> 1 spec: selector: 2 labels: app: <product_page> pluginConfig: <yaml_configuration> url: oci://registry.redhat.io/3scale-amp2/3scale-auth-wasm-rhel8:0.0.3 phase: AUTHZ priority: 100",
"oc apply -f threescale-wasm-auth-bookinfo.yaml",
"apiVersion: networking.istio.io/v1beta1 kind: ServiceEntry metadata: name: service-entry-threescale-saas-backend spec: hosts: - su1.3scale.net ports: - number: 443 name: https protocol: HTTPS location: MESH_EXTERNAL resolution: DNS",
"apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: destination-rule-threescale-saas-backend spec: host: su1.3scale.net trafficPolicy: tls: mode: SIMPLE sni: su1.3scale.net",
"oc apply -f service-entry-threescale-saas-backend.yml",
"oc apply -f destination-rule-threescale-saas-backend.yml",
"apiVersion: networking.istio.io/v1beta1 kind: ServiceEntry metadata: name: service-entry-threescale-saas-system spec: hosts: - multitenant.3scale.net ports: - number: 443 name: https protocol: HTTPS location: MESH_EXTERNAL resolution: DNS",
"apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: name: destination-rule-threescale-saas-system spec: host: multitenant.3scale.net trafficPolicy: tls: mode: SIMPLE sni: multitenant.3scale.net",
"oc apply -f service-entry-threescale-saas-system.yml",
"oc apply -f <destination-rule-threescale-saas-system.yml>",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> namespace: <bookinfo> spec: pluginConfig: api: v1",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: pluginConfig: system: name: <saas_porta> upstream: <object> token: <my_account_token> ttl: 300",
"apiVersion: maistra.io/v1 upstream: name: outbound|443||multitenant.3scale.net url: \"https://myaccount-admin.3scale.net/\" timeout: 5000",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: pluginConfig: backend: name: backend upstream: <object>",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: pluginConfig: services: - id: \"2555417834789\" token: service_token authorities: - \"*.app\" - 0.0.0.0 - \"0.0.0.0:8443\" credentials: <object> mapping_rules: <object>",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: pluginConfig: services: - credentials: user_key: <array_of_lookup_queries> app_id: <array_of_lookup_queries> app_key: <array_of_lookup_queries>",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: pluginConfig: services: - credentials: user_key: - <source_type>: <object> - <source_type>: <object> app_id: - <source_type>: <object> app_key: - <source_type>: <object>",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: pluginConfig: mapping_rules: - method: GET pattern: / usages: - name: hits delta: 1 - method: GET pattern: /products/ usages: - name: products delta: 1 - method: ANY pattern: /products/{id}/sold usages: - name: sales delta: 1 - name: products delta: 1",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: services: credentials: user_key: - query_string: keys: - <user_key> - header: keys: - <user_key>",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: services: credentials: app_id: - query_string: keys: - <app_id> - header: keys: - <app_id> app_key: - query_string: keys: - <app_key> - header: keys: - <app_key>",
"aladdin:opensesame: Authorization: Basic YWxhZGRpbjpvcGVuc2VzYW1l",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: services: credentials: app_id: - header: keys: - authorization ops: - split: separator: \" \" max: 2 - length: min: 2 - drop: head: 1 - base64_urlsafe - split: max: 2 app_key: - header: keys: - app_key",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: services: credentials: app_id: - header: keys: - authorization ops: - split: separator: \" \" max: 2 - length: min: 2 - reverse - glob: - Basic - drop: tail: 1 - base64_urlsafe - split: max: 2 - test: if: length: min: 2 then: - strlen: max: 63 - or: - strlen: min: 1 - drop: tail: 1 - assert: - and: - reverse - or: - strlen: min: 8 - glob: - aladdin - admin",
"apiVersion: security.istio.io/v1beta1 kind: RequestAuthentication metadata: name: jwt-example namespace: bookinfo spec: selector: matchLabels: app: productpage jwtRules: - issuer: >- http://keycloak-keycloak.34.242.107.254.nip.io/auth/realms/3scale-keycloak jwksUri: >- http://keycloak-keycloak.34.242.107.254.nip.io/auth/realms/3scale-keycloak/protocol/openid-connect/certs",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: services: credentials: app_id: - filter: path: - envoy.filters.http.jwt_authn - \"0\" keys: - azp - aud ops: - take: head: 1",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: services: credentials: app_id: - header: keys: - x-jwt-payload ops: - base64_urlsafe - json: - keys: - azp - aud - take: head: 1 ,,,",
"apiVersion: extensions.istio.io/v1alpha1 kind: WasmPlugin metadata: name: <threescale_wasm_plugin_name> spec: url: oci://registry.redhat.io/3scale-amp2/3scale-auth-wasm-rhel8:0.0.3 imagePullSecret: <optional_pull_secret_resource> phase: AUTHZ priority: 100 selector: labels: app: <product_page> pluginConfig: api: v1 system: name: <system_name> upstream: name: outbound|443||multitenant.3scale.net url: https://istiodevel-admin.3scale.net/ timeout: 5000 token: <token> backend: name: <backend_name> upstream: name: outbound|443||su1.3scale.net url: https://su1.3scale.net/ timeout: 5000 extensions: - no_body services: - id: '2555417834780' authorities: - \"*\" credentials: user_key: - query_string: keys: - <user_key> - header: keys: - <user_key> app_id: - query_string: keys: - <app_id> - header: keys: - <app_id> app_key: - query_string: keys: - <app_key> - header: keys: - <app_key>",
"apiVersion: \"config.istio.io/v1alpha2\" kind: handler metadata: name: threescale spec: adapter: threescale params: system_url: \"https://<organization>-admin.3scale.net/\" access_token: \"<ACCESS_TOKEN>\" connection: address: \"threescale-istio-adapter:3333\"",
"apiVersion: \"config.istio.io/v1alpha2\" kind: rule metadata: name: threescale spec: match: destination.labels[\"service-mesh.3scale.net\"] == \"true\" actions: - handler: threescale.handler instances: - threescale-authorization.instance",
"3scale-config-gen --name=admin-credentials --url=\"https://<organization>-admin.3scale.net:443\" --token=\"[redacted]\"",
"3scale-config-gen --url=\"https://<organization>-admin.3scale.net\" --name=\"my-unique-id\" --service=\"123456789\" --token=\"[redacted]\"",
"export NS=\"istio-system\" URL=\"https://replaceme-admin.3scale.net:443\" NAME=\"name\" TOKEN=\"token\" exec -n USD{NS} USD(oc get po -n USD{NS} -o jsonpath='{.items[?(@.metadata.labels.app==\"3scale-istio-adapter\")].metadata.name}') -it -- ./3scale-config-gen --url USD{URL} --name USD{NAME} --token USD{TOKEN} -n USD{NS}",
"export CREDENTIALS_NAME=\"replace-me\" export SERVICE_ID=\"replace-me\" export DEPLOYMENT=\"replace-me\" patch=\"USD(oc get deployment \"USD{DEPLOYMENT}\" patch=\"USD(oc get deployment \"USD{DEPLOYMENT}\" --template='{\"spec\":{\"template\":{\"metadata\":{\"labels\":{ {{ range USDk,USDv := .spec.template.metadata.labels }}\"{{ USDk }}\":\"{{ USDv }}\",{{ end }}\"service-mesh.3scale.net/service-id\":\"'\"USD{SERVICE_ID}\"'\",\"service-mesh.3scale.net/credentials\":\"'\"USD{CREDENTIALS_NAME}\"'\"}}}}}' )\" patch deployment \"USD{DEPLOYMENT}\" --patch ''\"USD{patch}\"''",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization namespace: istio-system spec: template: authorization params: subject: user: request.query_params[\"user_key\"] | request.headers[\"user-key\"] | \"\" action: path: request.url_path method: request.method | \"get\"",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization namespace: istio-system spec: template: authorization params: subject: app_id: request.query_params[\"app_id\"] | request.headers[\"app-id\"] | \"\" app_key: request.query_params[\"app_key\"] | request.headers[\"app-key\"] | \"\" action: path: request.url_path method: request.method | \"get\"",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization spec: template: threescale-authorization params: subject: properties: app_key: request.query_params[\"app_key\"] | request.headers[\"app-key\"] | \"\" client_id: request.auth.claims[\"azp\"] | \"\" action: path: request.url_path method: request.method | \"get\" service: destination.labels[\"service-mesh.3scale.net/service-id\"] | \"\"",
"apiVersion: security.istio.io/v1beta1 kind: RequestAuthentication metadata: name: jwt-example namespace: bookinfo spec: selector: matchLabels: app: productpage jwtRules: - issuer: >- http://keycloak-keycloak.34.242.107.254.nip.io/auth/realms/3scale-keycloak jwksUri: >- http://keycloak-keycloak.34.242.107.254.nip.io/auth/realms/3scale-keycloak/protocol/openid-connect/certs",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization spec: template: authorization params: subject: user: request.query_params[\"user_key\"] | request.headers[\"user-key\"] | properties: app_id: request.query_params[\"app_id\"] | request.headers[\"app-id\"] | \"\" app_key: request.query_params[\"app_key\"] | request.headers[\"app-key\"] | \"\" client_id: request.auth.claims[\"azp\"] | \"\" action: path: request.url_path method: request.method | \"get\" service: destination.labels[\"service-mesh.3scale.net/service-id\"] | \"\"",
"oc get pods -n istio-system",
"oc logs istio-system",
"oc get pods -n openshift-operators",
"NAME READY STATUS RESTARTS AGE istio-operator-bb49787db-zgr87 1/1 Running 0 15s jaeger-operator-7d5c4f57d8-9xphf 1/1 Running 0 2m42s kiali-operator-f9c8d84f4-7xh2v 1/1 Running 0 64s",
"oc get pods -n openshift-operators-redhat",
"NAME READY STATUS RESTARTS AGE elasticsearch-operator-d4f59b968-796vq 1/1 Running 0 15s",
"oc logs -n openshift-operators <podName>",
"oc logs -n openshift-operators istio-operator-bb49787db-zgr87",
"oc get pods -n istio-system",
"NAME READY STATUS RESTARTS AGE grafana-6776785cfc-6fz7t 2/2 Running 0 102s istio-egressgateway-5f49dd99-l9ppq 1/1 Running 0 103s istio-ingressgateway-6dc885c48-jjd8r 1/1 Running 0 103s istiod-basic-6c9cc55998-wg4zq 1/1 Running 0 2m14s jaeger-6865d5d8bf-zrfss 2/2 Running 0 100s kiali-579799fbb7-8mwc8 1/1 Running 0 46s prometheus-5c579dfb-6qhjk 2/2 Running 0 115s",
"oc get smcp -n istio-system",
"NAME READY STATUS PROFILES VERSION AGE basic 10/10 ComponentsReady [\"default\"] 2.1.3 4m2s",
"NAME READY STATUS TEMPLATE VERSION AGE basic-install 10/10 UpdateSuccessful default v1.1 3d16h",
"oc describe smcp <smcp-name> -n <controlplane-namespace>",
"oc describe smcp basic -n istio-system",
"oc get jaeger -n istio-system",
"NAME STATUS VERSION STRATEGY STORAGE AGE jaeger Running 1.30.0 allinone memory 15m",
"oc get kiali -n istio-system",
"NAME AGE kiali 15m",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc get route -n istio-system jaeger -o jsonpath='{.spec.host}'",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc project istio-system",
"oc edit smcp <smcp_name>",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic namespace: istio-system spec: proxy: accessLogging: file: name: /dev/stdout #file name",
"oc adm must-gather --image=registry.redhat.io/openshift-service-mesh/istio-must-gather-rhel8:2.6",
"oc adm must-gather --image=registry.redhat.io/openshift-service-mesh/istio-must-gather-rhel8:2.6 gather <namespace>",
"oc get clusterversion -o jsonpath='{.items[].spec.clusterID}{\"\\n\"}'",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 proxy: runtime: container: resources: requests: cpu: 100m memory: 128Mi limits: cpu: 500m memory: 128Mi tracing: type: Jaeger gateways: ingress: # istio-ingressgateway service: type: ClusterIP ports: - name: status-port port: 15020 - name: http2 port: 80 targetPort: 8080 - name: https port: 443 targetPort: 8443 meshExpansionPorts: [] egress: # istio-egressgateway service: type: ClusterIP ports: - name: status-port port: 15020 - name: http2 port: 80 targetPort: 8080 - name: https port: 443 targetPort: 8443 additionalIngress: some-other-ingress-gateway: {} additionalEgress: some-other-egress-gateway: {} policy: type: Mixer mixer: # only applies if policy.type: Mixer enableChecks: true failOpen: false telemetry: type: Istiod # or Mixer mixer: # only applies if telemetry.type: Mixer, for v1 telemetry sessionAffinity: false batching: maxEntries: 100 maxTime: 1s adapters: kubernetesenv: true stdio: enabled: true outputAsJSON: true addons: grafana: enabled: true install: config: env: {} envSecrets: {} persistence: enabled: true storageClassName: \"\" accessMode: ReadWriteOnce capacity: requests: storage: 5Gi service: ingress: contextPath: /grafana tls: termination: reencrypt kiali: name: kiali enabled: true install: # install kiali CR if not present dashboard: viewOnly: false enableGrafana: true enableTracing: true enablePrometheus: true service: ingress: contextPath: /kiali jaeger: name: jaeger install: storage: type: Elasticsearch # or Memory memory: maxTraces: 100000 elasticsearch: nodeCount: 3 storage: {} redundancyPolicy: SingleRedundancy indexCleaner: {} ingress: {} # jaeger ingress configuration runtime: components: pilot: deployment: replicas: 2 pod: affinity: {} container: resources: requests: cpu: 100m memory: 128Mi limits: cpu: 500m memory: 128Mi grafana: deployment: {} pod: {} kiali: deployment: {} pod: {}",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: general: logging: componentLevels: {} # misc: error logAsJSON: false validationMessages: true",
"logging:",
"logging: componentLevels:",
"logging: logAsJSON:",
"validationMessages:",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: profiles: - YourProfileName",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 100 type: Jaeger",
"tracing: sampling:",
"tracing: type:",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: addons: 3Scale: enabled: false PARAM_THREESCALE_LISTEN_ADDR: 3333 PARAM_THREESCALE_LOG_LEVEL: info PARAM_THREESCALE_LOG_JSON: true PARAM_THREESCALE_LOG_GRPC: false PARAM_THREESCALE_REPORT_METRICS: true PARAM_THREESCALE_METRICS_PORT: 8080 PARAM_THREESCALE_CACHE_TTL_SECONDS: 300 PARAM_THREESCALE_CACHE_REFRESH_SECONDS: 180 PARAM_THREESCALE_CACHE_ENTRIES_MAX: 1000 PARAM_THREESCALE_CACHE_REFRESH_RETRIES: 1 PARAM_THREESCALE_ALLOW_INSECURE_CONN: false PARAM_THREESCALE_CLIENT_TIMEOUT_SECONDS: 10 PARAM_THREESCALE_GRPC_CONN_MAX_SECONDS: 60 PARAM_USE_CACHED_BACKEND: false PARAM_BACKEND_CACHE_FLUSH_INTERVAL_SECONDS: 15 PARAM_BACKEND_CACHE_POLICY_FAIL_CLOSED: true",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: addons: kiali: name: kiali enabled: true install: dashboard: viewOnly: false enableGrafana: true enableTracing: true enablePrometheus: true service: ingress: contextPath: /kiali",
"spec: addons: kiali: name:",
"kiali: enabled:",
"kiali: install:",
"kiali: install: dashboard:",
"kiali: install: dashboard: viewOnly:",
"kiali: install: dashboard: enableGrafana:",
"kiali: install: dashboard: enablePrometheus:",
"kiali: install: dashboard: enableTracing:",
"kiali: install: service:",
"kiali: install: service: metadata:",
"kiali: install: service: metadata: annotations:",
"kiali: install: service: metadata: labels:",
"kiali: install: service: ingress:",
"kiali: install: service: ingress: metadata: annotations:",
"kiali: install: service: ingress: metadata: labels:",
"kiali: install: service: ingress: enabled:",
"kiali: install: service: ingress: contextPath:",
"install: service: ingress: hosts:",
"install: service: ingress: tls:",
"kiali: install: service: nodePort:",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 100 type: Jaeger",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 10000 type: Jaeger addons: jaeger: name: jaeger install: storage: type: Memory",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 10000 type: Jaeger addons: jaeger: name: jaeger #name of Jaeger CR install: storage: type: Elasticsearch ingress: enabled: true runtime: components: tracing.jaeger.elasticsearch: # only supports resources and image name container: resources: {}",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 1000 type: Jaeger addons: jaeger: name: MyJaegerInstance #name of Jaeger CR install: storage: type: Elasticsearch ingress: enabled: true",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: version: v2.6 tracing: sampling: 1000 type: Jaeger addons: jaeger: name: MyJaegerInstance #name of Jaeger CR",
"apiVersion: jaegertracing.io/v1 kind: Jaeger spec: ingress: enabled: true openshift: htpasswdFile: /etc/proxy/htpasswd/auth sar: '{\"namespace\": \"istio-system\", \"resource\": \"pods\", \"verb\": \"get\"}' options: {} resources: {} security: oauth-proxy volumes: - name: secret-htpasswd secret: secretName: htpasswd - configMap: defaultMode: 420 items: - key: ca-bundle.crt path: tls-ca-bundle.pem name: trusted-ca-bundle optional: true name: trusted-ca-bundle volumeMounts: - mountPath: /etc/proxy/htpasswd name: secret-htpasswd - mountPath: /etc/pki/ca-trust/extracted/pem/ name: trusted-ca-bundle readOnly: true",
"oc login https://<HOSTNAME>:6443",
"oc project istio-system",
"oc edit -n openshift-distributed-tracing -f jaeger.yaml",
"apiVersion: jaegertracing.io/v1 kind: Jaeger spec: ingress: enabled: true openshift: htpasswdFile: /etc/proxy/htpasswd/auth sar: '{\"namespace\": \"istio-system\", \"resource\": \"pods\", \"verb\": \"get\"}' options: {} resources: {} security: oauth-proxy volumes: - name: secret-htpasswd secret: secretName: htpasswd - configMap: defaultMode: 420 items: - key: ca-bundle.crt path: tls-ca-bundle.pem name: trusted-ca-bundle optional: true name: trusted-ca-bundle volumeMounts: - mountPath: /etc/proxy/htpasswd name: secret-htpasswd - mountPath: /etc/pki/ca-trust/extracted/pem/ name: trusted-ca-bundle readOnly: true",
"oc get pods -n openshift-distributed-tracing",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: name spec: strategy: <deployment_strategy> allInOne: options: {} resources: {} agent: options: {} resources: {} collector: options: {} resources: {} sampling: options: {} storage: type: options: {} query: options: {} resources: {} ingester: options: {} resources: {} options: {}",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: jaeger-all-in-one-inmemory",
"collector: replicas:",
"spec: collector: options: {}",
"options: collector: num-workers:",
"options: collector: queue-size:",
"options: kafka: producer: topic: jaeger-spans",
"options: kafka: producer: brokers: my-cluster-kafka-brokers.kafka:9092",
"options: log-level:",
"options: otlp: enabled: true grpc: host-port: 4317 max-connection-age: 0s max-connection-age-grace: 0s max-message-size: 4194304 tls: enabled: false cert: /path/to/cert.crt cipher-suites: \"TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256\" client-ca: /path/to/cert.ca reload-interval: 0s min-version: 1.2 max-version: 1.3",
"options: otlp: enabled: true http: cors: allowed-headers: [<header-name>[, <header-name>]*] allowed-origins: * host-port: 4318 max-connection-age: 0s max-connection-age-grace: 0s max-message-size: 4194304 read-timeout: 0s read-header-timeout: 2s idle-timeout: 0s tls: enabled: false cert: /path/to/cert.crt cipher-suites: \"TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256\" client-ca: /path/to/cert.ca reload-interval: 0s min-version: 1.2 max-version: 1.3",
"spec: sampling: options: {} default_strategy: service_strategy:",
"default_strategy: type: service_strategy: type:",
"default_strategy: param: service_strategy: param:",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: with-sampling spec: sampling: options: default_strategy: type: probabilistic param: 0.5 service_strategies: - service: alpha type: probabilistic param: 0.8 operation_strategies: - operation: op1 type: probabilistic param: 0.2 - operation: op2 type: probabilistic param: 0.4 - service: beta type: ratelimiting param: 5",
"spec: sampling: options: default_strategy: type: probabilistic param: 1",
"spec: storage: type:",
"storage: secretname:",
"storage: options: {}",
"storage: esIndexCleaner: enabled:",
"storage: esIndexCleaner: numberOfDays:",
"storage: esIndexCleaner: schedule:",
"elasticsearch: properties: doNotProvision:",
"elasticsearch: properties: name:",
"elasticsearch: nodeCount:",
"elasticsearch: resources: requests: cpu:",
"elasticsearch: resources: requests: memory:",
"elasticsearch: resources: limits: cpu:",
"elasticsearch: resources: limits: memory:",
"elasticsearch: redundancyPolicy:",
"elasticsearch: useCertManagement:",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: simple-prod spec: strategy: production storage: type: elasticsearch elasticsearch: nodeCount: 3 resources: requests: cpu: 1 memory: 16Gi limits: memory: 16Gi",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: simple-prod spec: strategy: production storage: type: elasticsearch elasticsearch: nodeCount: 1 storage: 1 storageClassName: gp2 size: 5Gi resources: requests: cpu: 200m memory: 4Gi limits: memory: 4Gi redundancyPolicy: ZeroRedundancy",
"es: server-urls:",
"es: max-doc-count:",
"es: max-num-spans:",
"es: max-span-age:",
"es: sniffer:",
"es: sniffer-tls-enabled:",
"es: timeout:",
"es: username:",
"es: password:",
"es: version:",
"es: num-replicas:",
"es: num-shards:",
"es: create-index-templates:",
"es: index-prefix:",
"es: bulk: actions:",
"es: bulk: flush-interval:",
"es: bulk: size:",
"es: bulk: workers:",
"es: tls: ca:",
"es: tls: cert:",
"es: tls: enabled:",
"es: tls: key:",
"es: tls: server-name:",
"es: token-file:",
"es-archive: bulk: actions:",
"es-archive: bulk: flush-interval:",
"es-archive: bulk: size:",
"es-archive: bulk: workers:",
"es-archive: create-index-templates:",
"es-archive: enabled:",
"es-archive: index-prefix:",
"es-archive: max-doc-count:",
"es-archive: max-num-spans:",
"es-archive: max-span-age:",
"es-archive: num-replicas:",
"es-archive: num-shards:",
"es-archive: password:",
"es-archive: server-urls:",
"es-archive: sniffer:",
"es-archive: sniffer-tls-enabled:",
"es-archive: timeout:",
"es-archive: tls: ca:",
"es-archive: tls: cert:",
"es-archive: tls: enabled:",
"es-archive: tls: key:",
"es-archive: tls: server-name:",
"es-archive: token-file:",
"es-archive: username:",
"es-archive: version:",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: simple-prod spec: strategy: production storage: type: elasticsearch options: es: server-urls: https://quickstart-es-http.default.svc:9200 index-prefix: my-prefix tls: ca: /es/certificates/ca.crt secretName: tracing-secret volumeMounts: - name: certificates mountPath: /es/certificates/ readOnly: true volumes: - name: certificates secret: secretName: quickstart-es-http-certs-public",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: simple-prod spec: strategy: production storage: type: elasticsearch options: es: server-urls: https://quickstart-es-http.default.svc:9200 1 index-prefix: my-prefix tls: 2 ca: /es/certificates/ca.crt secretName: tracing-secret 3 volumeMounts: 4 - name: certificates mountPath: /es/certificates/ readOnly: true volumes: - name: certificates secret: secretName: quickstart-es-http-certs-public",
"apiVersion: logging.openshift.io/v1 kind: Elasticsearch metadata: annotations: logging.openshift.io/elasticsearch-cert-management: \"true\" logging.openshift.io/elasticsearch-cert.jaeger-custom-es: \"user.jaeger\" logging.openshift.io/elasticsearch-cert.curator-custom-es: \"system.logging.curator\" name: custom-es spec: managementState: Managed nodeSpec: resources: limits: memory: 16Gi requests: cpu: 1 memory: 16Gi nodes: - nodeCount: 3 proxyResources: {} resources: {} roles: - master - client - data storage: {} redundancyPolicy: ZeroRedundancy",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: jaeger-prod spec: strategy: production storage: type: elasticsearch elasticsearch: name: custom-es doNotProvision: true useCertManagement: true",
"spec: query: replicas:",
"spec: query: options: {}",
"options: log-level:",
"options: query: base-path:",
"apiVersion: jaegertracing.io/v1 kind: \"Jaeger\" metadata: name: \"my-jaeger\" spec: strategy: allInOne allInOne: options: log-level: debug query: base-path: /jaeger",
"spec: ingester: options: {}",
"options: deadlockInterval:",
"options: kafka: consumer: topic:",
"options: kafka: consumer: brokers:",
"options: log-level:",
"apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: simple-streaming spec: strategy: streaming collector: options: kafka: producer: topic: jaeger-spans brokers: my-cluster-kafka-brokers.kafka:9092 ingester: options: kafka: consumer: topic: jaeger-spans brokers: my-cluster-kafka-brokers.kafka:9092 ingester: deadlockInterval: 5 storage: type: elasticsearch options: es: server-urls: http://elasticsearch:9200",
"oc delete smmr -n istio-system default",
"oc get smcp -n istio-system",
"oc delete smcp -n istio-system <name_of_custom_resource>",
"oc -n openshift-operators delete ds -lmaistra-version",
"oc delete clusterrole/istio-admin clusterrole/istio-cni clusterrolebinding/istio-cni clusterrole/ossm-cni clusterrolebinding/ossm-cni",
"oc delete clusterrole istio-view istio-edit",
"oc delete clusterrole jaegers.jaegertracing.io-v1-admin jaegers.jaegertracing.io-v1-crdview jaegers.jaegertracing.io-v1-edit jaegers.jaegertracing.io-v1-view",
"oc get crds -o name | grep '.*\\.istio\\.io' | xargs -r -n 1 oc delete",
"oc get crds -o name | grep '.*\\.maistra\\.io' | xargs -r -n 1 oc delete",
"oc get crds -o name | grep '.*\\.kiali\\.io' | xargs -r -n 1 oc delete",
"oc delete crds jaegers.jaegertracing.io",
"oc delete cm -n openshift-operators -lmaistra-version",
"oc delete sa -n openshift-operators -lmaistra-version",
"oc adm must-gather --image=registry.redhat.io/container-native-virtualization/cnv-must-gather-rhel9:v4.14.11",
"oc adm must-gather -- /usr/bin/gather_audit_logs",
"NAMESPACE NAME READY STATUS RESTARTS AGE openshift-must-gather-5drcj must-gather-bklx4 2/2 Running 0 72s openshift-must-gather-5drcj must-gather-s8sdh 2/2 Running 0 72s",
"oc adm must-gather --run-namespace <namespace> --image=registry.redhat.io/container-native-virtualization/cnv-must-gather-rhel9:v4.14.11",
"oc adm must-gather --image=registry.redhat.io/openshift-service-mesh/istio-must-gather-rhel8:2.6",
"oc adm must-gather --image=registry.redhat.io/openshift-service-mesh/istio-must-gather-rhel8:2.6 gather <namespace>",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: httpbin namespace: foo spec: action: DENY rules: - from: - source: namespaces: [\"dev\"] to: - operation: hosts: [\"httpbin.com\",\"httpbin.com:*\"]",
"apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: name: httpbin namespace: default spec: action: DENY rules: - to: - operation: hosts: [\"httpbin.example.com:*\"]",
"spec: global: pathNormalization: <option>",
"{ \"runtime\": { \"symlink_root\": \"/var/lib/istio/envoy/runtime\" } }",
"oc create secret generic -n <SMCPnamespace> gateway-bootstrap --from-file=bootstrap-override.json",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: gateways: istio-ingressgateway: env: ISTIO_BOOTSTRAP_OVERRIDE: /var/lib/istio/envoy/custom-bootstrap/bootstrap-override.json secretVolumes: - mountPath: /var/lib/istio/envoy/custom-bootstrap name: custom-bootstrap secretName: gateway-bootstrap",
"oc create secret generic -n <SMCPnamespace> gateway-settings --from-literal=overload.global_downstream_max_connections=10000",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: template: default #Change the version to \"v1.0\" if you are on the 1.0 stream. version: v1.1 istio: gateways: istio-ingressgateway: env: ISTIO_BOOTSTRAP_OVERRIDE: /var/lib/istio/envoy/custom-bootstrap/bootstrap-override.json secretVolumes: - mountPath: /var/lib/istio/envoy/custom-bootstrap name: custom-bootstrap secretName: gateway-bootstrap # below is the new secret mount - mountPath: /var/lib/istio/envoy/runtime name: gateway-settings secretName: gateway-settings",
"oc get jaeger -n istio-system",
"NAME AGE jaeger 3d21h",
"oc get jaeger jaeger -oyaml -n istio-system > /tmp/jaeger-cr.yaml",
"oc delete jaeger jaeger -n istio-system",
"oc create -f /tmp/jaeger-cr.yaml -n istio-system",
"rm /tmp/jaeger-cr.yaml",
"oc delete -f <jaeger-cr-file>",
"oc delete -f jaeger-prod-elasticsearch.yaml",
"oc create -f <jaeger-cr-file>",
"oc get pods -n jaeger-system -w",
"spec: version: v1.1",
"apiVersion: \"rbac.istio.io/v1alpha1\" kind: ServiceRoleBinding metadata: name: httpbin-client-binding namespace: httpbin spec: subjects: - user: \"cluster.local/ns/istio-system/sa/istio-ingressgateway-service-account\" properties: request.headers[<header>]: \"value\"",
"apiVersion: \"rbac.istio.io/v1alpha1\" kind: ServiceRoleBinding metadata: name: httpbin-client-binding namespace: httpbin spec: subjects: - user: \"cluster.local/ns/istio-system/sa/istio-ingressgateway-service-account\" properties: request.regex.headers[<header>]: \"<regular expression>\"",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc new-project istio-system",
"oc create -n istio-system -f istio-installation.yaml",
"oc get smcp -n istio-system",
"NAME READY STATUS PROFILES VERSION AGE basic-install 11/11 ComponentsReady [\"default\"] v1.1.18 4m25s",
"oc get pods -n istio-system -w",
"NAME READY STATUS RESTARTS AGE grafana-7bf5764d9d-2b2f6 2/2 Running 0 28h istio-citadel-576b9c5bbd-z84z4 1/1 Running 0 28h istio-egressgateway-5476bc4656-r4zdv 1/1 Running 0 28h istio-galley-7d57b47bb7-lqdxv 1/1 Running 0 28h istio-ingressgateway-dbb8f7f46-ct6n5 1/1 Running 0 28h istio-pilot-546bf69578-ccg5x 2/2 Running 0 28h istio-policy-77fd498655-7pvjw 2/2 Running 0 28h istio-sidecar-injector-df45bd899-ctxdt 1/1 Running 0 28h istio-telemetry-66f697d6d5-cj28l 2/2 Running 0 28h jaeger-896945cbc-7lqrr 2/2 Running 0 11h kiali-78d9c5b87c-snjzh 1/1 Running 0 22h prometheus-6dff867c97-gr2n5 2/2 Running 0 28h",
"oc login --username=<NAMEOFUSER> https://<HOSTNAME>:6443",
"oc new-project <your-project>",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default namespace: istio-system spec: members: # a list of projects joined into the service mesh - your-project-name - another-project-name",
"oc create -n istio-system -f servicemeshmemberroll-default.yaml",
"oc get smmr -n istio-system default",
"oc edit smmr -n <controlplane-namespace>",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default namespace: istio-system #control plane project spec: members: # a list of projects joined into the service mesh - your-project-name - another-project-name",
"oc patch deployment/<deployment> -p '{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/restartedAt\": \"'`date -Iseconds`'\"}}}}}'",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: global: mtls: enabled: true",
"apiVersion: \"authentication.istio.io/v1alpha1\" kind: \"Policy\" metadata: name: default namespace: <NAMESPACE> spec: peers: - mtls: {}",
"apiVersion: \"networking.istio.io/v1alpha3\" kind: \"DestinationRule\" metadata: name: \"default\" namespace: <CONTROL_PLANE_NAMESPACE>> spec: host: \"*.local\" trafficPolicy: tls: mode: ISTIO_MUTUAL",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: global: tls: minProtocolVersion: TLSv1_2 maxProtocolVersion: TLSv1_3",
"oc create secret generic cacerts -n istio-system --from-file=<path>/ca-cert.pem --from-file=<path>/ca-key.pem --from-file=<path>/root-cert.pem --from-file=<path>/cert-chain.pem",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: global: mtls: enabled: true security: selfSigned: false",
"oc delete secret istio.default",
"RATINGSPOD=`oc get pods -l app=ratings -o jsonpath='{.items[0].metadata.name}'`",
"oc exec -it USDRATINGSPOD -c istio-proxy -- /bin/cat /etc/certs/root-cert.pem > /tmp/pod-root-cert.pem",
"oc exec -it USDRATINGSPOD -c istio-proxy -- /bin/cat /etc/certs/cert-chain.pem > /tmp/pod-cert-chain.pem",
"openssl x509 -in <path>/root-cert.pem -text -noout > /tmp/root-cert.crt.txt",
"openssl x509 -in /tmp/pod-root-cert.pem -text -noout > /tmp/pod-root-cert.crt.txt",
"diff /tmp/root-cert.crt.txt /tmp/pod-root-cert.crt.txt",
"sed '0,/^-----END CERTIFICATE-----/d' /tmp/pod-cert-chain.pem > /tmp/pod-cert-chain-ca.pem",
"openssl x509 -in <path>/ca-cert.pem -text -noout > /tmp/ca-cert.crt.txt",
"openssl x509 -in /tmp/pod-cert-chain-ca.pem -text -noout > /tmp/pod-cert-chain-ca.crt.txt",
"diff /tmp/ca-cert.crt.txt /tmp/pod-cert-chain-ca.crt.txt",
"head -n 21 /tmp/pod-cert-chain.pem > /tmp/pod-cert-chain-workload.pem",
"openssl verify -CAfile <(cat <path>/ca-cert.pem <path>/root-cert.pem) /tmp/pod-cert-chain-workload.pem",
"/tmp/pod-cert-chain-workload.pem: OK",
"oc delete secret cacerts -n istio-system",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: global: mtls: enabled: true security: selfSigned: true",
"apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: ext-host-gwy spec: selector: istio: ingressgateway # use istio default controller servers: - port: number: 443 name: https protocol: HTTPS hosts: - ext-host.example.com tls: mode: SIMPLE serverCertificate: /tmp/tls.crt privateKey: /tmp/tls.key",
"apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: virtual-svc spec: hosts: - ext-host.example.com gateways: - ext-host-gwy",
"apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: bookinfo-gateway spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - \"*\"",
"oc apply -f gateway.yaml",
"apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: bookinfo spec: hosts: - \"*\" gateways: - bookinfo-gateway http: - match: - uri: exact: /productpage - uri: prefix: /static - uri: exact: /login - uri: exact: /logout - uri: prefix: /api/v1/products route: - destination: host: productpage port: number: 9080",
"oc apply -f vs.yaml",
"export GATEWAY_URL=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.host}')",
"export TARGET_PORT=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.port.targetPort}')",
"curl -s -I \"USDGATEWAY_URL/productpage\"",
"oc get svc istio-ingressgateway -n istio-system",
"export INGRESS_HOST=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')",
"export INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"http2\")].port}')",
"export SECURE_INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"https\")].port}')",
"export TCP_INGRESS_PORT=USD(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"tcp\")].port}')",
"export INGRESS_HOST=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')",
"export INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"http2\")].nodePort}')",
"export SECURE_INGRESS_PORT=USD(oc -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"https\")].nodePort}')",
"export TCP_INGRESS_PORT=USD(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name==\"tcp\")].nodePort}')",
"spec: istio: gateways: istio-egressgateway: autoscaleEnabled: false autoscaleMin: 1 autoscaleMax: 5 istio-ingressgateway: autoscaleEnabled: false autoscaleMin: 1 autoscaleMax: 5 ior_enabled: true",
"apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: gateway1 spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - www.bookinfo.com - bookinfo.example.com",
"oc -n <control_plane_namespace> get routes",
"NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD gateway1-lvlfn bookinfo.example.com istio-ingressgateway <all> None gateway1-scqhv www.bookinfo.com istio-ingressgateway <all> None",
"apiVersion: networking.istio.io/v1alpha3 kind: ServiceEntry metadata: name: svc-entry spec: hosts: - ext-svc.example.com ports: - number: 443 name: https protocol: HTTPS location: MESH_EXTERNAL resolution: DNS",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: ext-res-dr spec: host: ext-svc.example.com trafficPolicy: tls: mode: MUTUAL clientCertificate: /etc/certs/myclientcert.pem privateKey: /etc/certs/client_private_key.pem caCertificates: /etc/certs/rootcacerts.pem",
"apiVersion: networking.istio.io/v1alpha3 kind: VirtualService metadata: name: reviews spec: hosts: - reviews http: - match: - headers: end-user: exact: jason route: - destination: host: reviews subset: v2 - route: - destination: host: reviews subset: v3",
"oc apply -f <VirtualService.yaml>",
"spec: hosts:",
"spec: http: - match:",
"spec: http: - match: - destination:",
"apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule metadata: name: my-destination-rule spec: host: my-svc trafficPolicy: loadBalancer: simple: RANDOM subsets: - name: v1 labels: version: v1 - name: v2 labels: version: v2 trafficPolicy: loadBalancer: simple: ROUND_ROBIN - name: v3 labels: version: v3",
"oc apply -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/virtual-service-all-v1.yaml",
"oc get virtualservices -o yaml",
"export GATEWAY_URL=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.host}')",
"echo \"http://USDGATEWAY_URL/productpage\"",
"oc apply -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/virtual-service-reviews-test-v2.yaml",
"oc get virtualservice reviews -o yaml",
"oc create configmap --from-file=<templates-directory> smcp-templates -n openshift-operators",
"oc get clusterserviceversion -n openshift-operators | grep 'Service Mesh'",
"maistra.v1.0.0 Red Hat OpenShift Service Mesh 1.0.0 Succeeded",
"oc edit clusterserviceversion -n openshift-operators maistra.v1.0.0",
"deployments: - name: istio-operator spec: template: spec: containers: volumeMounts: - name: discovery-cache mountPath: /home/istio-operator/.kube/cache/discovery - name: smcp-templates mountPath: /usr/local/share/istio-operator/templates/ volumes: - name: discovery-cache emptyDir: medium: Memory - name: smcp-templates configMap: name: smcp-templates",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane metadata: name: minimal-install spec: template: default",
"oc get deployment -n <namespace>",
"get deployment -n bookinfo ratings-v1 -o yaml",
"apiVersion: apps/v1 kind: Deployment metadata: name: ratings-v1 namespace: bookinfo labels: app: ratings version: v1 spec: template: metadata: labels: sidecar.istio.io/inject: 'true'",
"oc apply -n <namespace> -f deployment.yaml",
"oc apply -n bookinfo -f deployment-ratings-v1.yaml",
"oc get deployment -n <namespace> <deploymentName> -o yaml",
"oc get deployment -n bookinfo ratings-v1 -o yaml",
"apiVersion: apps/v1 kind: Deployment metadata: name: resource spec: replicas: 7 selector: matchLabels: app: resource template: metadata: annotations: sidecar.maistra.io/proxyEnv: \"{ \\\"maistra_test_env\\\": \\\"env_value\\\", \\\"maistra_test_env_2\\\": \\\"env_value_2\\\" }\"",
"oc get cm -n istio-system istio -o jsonpath='{.data.mesh}' | grep disablePolicyChecks",
"oc edit cm -n istio-system istio",
"oc new-project bookinfo",
"apiVersion: maistra.io/v1 kind: ServiceMeshMemberRoll metadata: name: default spec: members: - bookinfo",
"oc create -n istio-system -f servicemeshmemberroll-default.yaml",
"oc get smmr -n istio-system -o wide",
"NAME READY STATUS AGE MEMBERS default 1/1 Configured 70s [\"bookinfo\"]",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/platform/kube/bookinfo.yaml",
"service/details created serviceaccount/bookinfo-details created deployment.apps/details-v1 created service/ratings created serviceaccount/bookinfo-ratings created deployment.apps/ratings-v1 created service/reviews created serviceaccount/bookinfo-reviews created deployment.apps/reviews-v1 created deployment.apps/reviews-v2 created deployment.apps/reviews-v3 created service/productpage created serviceaccount/bookinfo-productpage created deployment.apps/productpage-v1 created",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/bookinfo-gateway.yaml",
"gateway.networking.istio.io/bookinfo-gateway created virtualservice.networking.istio.io/bookinfo created",
"export GATEWAY_URL=USD(oc -n istio-system get route istio-ingressgateway -o jsonpath='{.spec.host}')",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/destination-rule-all.yaml",
"oc apply -n bookinfo -f https://raw.githubusercontent.com/Maistra/istio/maistra-2.6/samples/bookinfo/networking/destination-rule-all-mtls.yaml",
"destinationrule.networking.istio.io/productpage created destinationrule.networking.istio.io/reviews created destinationrule.networking.istio.io/ratings created destinationrule.networking.istio.io/details created",
"oc get pods -n bookinfo",
"NAME READY STATUS RESTARTS AGE details-v1-55b869668-jh7hb 2/2 Running 0 12m productpage-v1-6fc77ff794-nsl8r 2/2 Running 0 12m ratings-v1-7d7d8d8b56-55scn 2/2 Running 0 12m reviews-v1-868597db96-bdxgq 2/2 Running 0 12m reviews-v2-5b64f47978-cvssp 2/2 Running 0 12m reviews-v3-6dfd49b55b-vcwpf 2/2 Running 0 12m",
"echo \"http://USDGATEWAY_URL/productpage\"",
"oc delete project bookinfo",
"oc -n istio-system patch --type='json' smmr default -p '[{\"op\": \"remove\", \"path\": \"/spec/members\", \"value\":[\"'\"bookinfo\"'\"]}]'",
"curl \"http://USDGATEWAY_URL/productpage\"",
"export JAEGER_URL=USD(oc get route -n istio-system jaeger -o jsonpath='{.spec.host}')",
"echo USDJAEGER_URL",
"curl \"http://USDGATEWAY_URL/productpage\"",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane metadata: name: basic-install spec: istio: global: proxy: resources: requests: cpu: 100m memory: 128Mi limits: cpu: 500m memory: 128Mi gateways: istio-egressgateway: autoscaleEnabled: false istio-ingressgateway: autoscaleEnabled: false ior_enabled: false mixer: policy: autoscaleEnabled: false telemetry: autoscaleEnabled: false resources: requests: cpu: 100m memory: 1G limits: cpu: 500m memory: 4G pilot: autoscaleEnabled: false traceSampling: 100 kiali: enabled: true grafana: enabled: true tracing: enabled: true jaeger: template: all-in-one",
"istio: global: tag: 1.1.0 hub: registry.redhat.io/openshift-service-mesh/ proxy: resources: requests: cpu: 10m memory: 128Mi limits: mtls: enabled: false disablePolicyChecks: true policyCheckFailOpen: false imagePullSecrets: - MyPullSecret",
"gateways: egress: enabled: true runtime: deployment: autoScaling: enabled: true maxReplicas: 5 minReplicas: 1 enabled: true ingress: enabled: true runtime: deployment: autoScaling: enabled: true maxReplicas: 5 minReplicas: 1",
"mixer: enabled: true policy: autoscaleEnabled: false telemetry: autoscaleEnabled: false resources: requests: cpu: 10m memory: 128Mi limits:",
"spec: runtime: components: pilot: deployment: autoScaling: enabled: true minReplicas: 1 maxReplicas: 5 targetCPUUtilizationPercentage: 85 pod: tolerations: - key: node.kubernetes.io/unreachable operator: Exists effect: NoExecute tolerationSeconds: 60 affinity: podAntiAffinity: requiredDuringScheduling: - key: istio topologyKey: kubernetes.io/hostname operator: In values: - pilot container: resources: limits: cpu: 100m memory: 128M",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: kiali: enabled: true dashboard: viewOnlyMode: false ingress: enabled: true",
"enabled",
"dashboard viewOnlyMode",
"ingress enabled",
"spec: kiali: enabled: true dashboard: viewOnlyMode: false grafanaURL: \"https://grafana-istio-system.127.0.0.1.nip.io\" ingress: enabled: true",
"spec: kiali: enabled: true dashboard: viewOnlyMode: false jaegerURL: \"http://jaeger-query-istio-system.127.0.0.1.nip.io\" ingress: enabled: true",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: version: v1.1 istio: tracing: enabled: true jaeger: template: all-in-one",
"tracing: enabled:",
"jaeger: template:",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: tracing: enabled: true ingress: enabled: true jaeger: template: production-elasticsearch elasticsearch: nodeCount: 3 redundancyPolicy: resources: requests: cpu: \"1\" memory: \"16Gi\" limits: cpu: \"1\" memory: \"16Gi\"",
"tracing: enabled:",
"ingress: enabled:",
"jaeger: template:",
"elasticsearch: nodeCount:",
"requests: cpu:",
"requests: memory:",
"limits: cpu:",
"limits: memory:",
"oc get route -n istio-system external-jaeger",
"NAME HOST/PORT PATH SERVICES [...] external-jaeger external-jaeger-istio-system.apps.test external-jaeger-query [...]",
"apiVersion: jaegertracing.io/v1 kind: \"Jaeger\" metadata: name: \"external-jaeger\" # Deploy to the Control Plane Namespace namespace: istio-system spec: # Set Up Authentication ingress: enabled: true security: oauth-proxy openshift: # This limits user access to the Jaeger instance to users who have access # to the control plane namespace. Make sure to set the correct namespace here sar: '{\"namespace\": \"istio-system\", \"resource\": \"pods\", \"verb\": \"get\"}' htpasswdFile: /etc/proxy/htpasswd/auth volumeMounts: - name: secret-htpasswd mountPath: /etc/proxy/htpasswd volumes: - name: secret-htpasswd secret: secretName: htpasswd",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane metadata: name: external-jaeger namespace: istio-system spec: version: v1.1 istio: tracing: # Disable Jaeger deployment by service mesh operator enabled: false global: tracer: zipkin: # Set Endpoint for Trace Collection address: external-jaeger-collector.istio-system.svc.cluster.local:9411 kiali: # Set Jaeger dashboard URL dashboard: jaegerURL: https://external-jaeger-istio-system.apps.test # Set Endpoint for Trace Querying jaegerInClusterURL: external-jaeger-query.istio-system.svc.cluster.local",
"apiVersion: maistra.io/v1 kind: ServiceMeshControlPlane spec: istio: tracing: enabled: true ingress: enabled: true jaeger: template: production-elasticsearch elasticsearch: nodeCount: 3 redundancyPolicy: resources: requests: cpu: \"1\" memory: \"16Gi\" limits: cpu: \"1\" memory: \"16Gi\"",
"tracing: enabled:",
"ingress: enabled:",
"jaeger: template:",
"elasticsearch: nodeCount:",
"requests: cpu:",
"requests: memory:",
"limits: cpu:",
"limits: memory:",
"apiVersion: jaegertracing.io/v1 kind: Jaeger spec: strategy: production storage: type: elasticsearch esIndexCleaner: enabled: false numberOfDays: 7 schedule: \"55 23 * * *\"",
"apiVersion: maistra.io/v2 kind: ServiceMeshControlPlane metadata: name: basic spec: addons: 3Scale: enabled: false PARAM_THREESCALE_LISTEN_ADDR: 3333 PARAM_THREESCALE_LOG_LEVEL: info PARAM_THREESCALE_LOG_JSON: true PARAM_THREESCALE_LOG_GRPC: false PARAM_THREESCALE_REPORT_METRICS: true PARAM_THREESCALE_METRICS_PORT: 8080 PARAM_THREESCALE_CACHE_TTL_SECONDS: 300 PARAM_THREESCALE_CACHE_REFRESH_SECONDS: 180 PARAM_THREESCALE_CACHE_ENTRIES_MAX: 1000 PARAM_THREESCALE_CACHE_REFRESH_RETRIES: 1 PARAM_THREESCALE_ALLOW_INSECURE_CONN: false PARAM_THREESCALE_CLIENT_TIMEOUT_SECONDS: 10 PARAM_THREESCALE_GRPC_CONN_MAX_SECONDS: 60 PARAM_USE_CACHED_BACKEND: false PARAM_BACKEND_CACHE_FLUSH_INTERVAL_SECONDS: 15 PARAM_BACKEND_CACHE_POLICY_FAIL_CLOSED: true",
"apiVersion: \"config.istio.io/v1alpha2\" kind: handler metadata: name: threescale spec: adapter: threescale params: system_url: \"https://<organization>-admin.3scale.net/\" access_token: \"<ACCESS_TOKEN>\" connection: address: \"threescale-istio-adapter:3333\"",
"apiVersion: \"config.istio.io/v1alpha2\" kind: rule metadata: name: threescale spec: match: destination.labels[\"service-mesh.3scale.net\"] == \"true\" actions: - handler: threescale.handler instances: - threescale-authorization.instance",
"3scale-config-gen --name=admin-credentials --url=\"https://<organization>-admin.3scale.net:443\" --token=\"[redacted]\"",
"3scale-config-gen --url=\"https://<organization>-admin.3scale.net\" --name=\"my-unique-id\" --service=\"123456789\" --token=\"[redacted]\"",
"export NS=\"istio-system\" URL=\"https://replaceme-admin.3scale.net:443\" NAME=\"name\" TOKEN=\"token\" exec -n USD{NS} USD(oc get po -n USD{NS} -o jsonpath='{.items[?(@.metadata.labels.app==\"3scale-istio-adapter\")].metadata.name}') -it -- ./3scale-config-gen --url USD{URL} --name USD{NAME} --token USD{TOKEN} -n USD{NS}",
"export CREDENTIALS_NAME=\"replace-me\" export SERVICE_ID=\"replace-me\" export DEPLOYMENT=\"replace-me\" patch=\"USD(oc get deployment \"USD{DEPLOYMENT}\" patch=\"USD(oc get deployment \"USD{DEPLOYMENT}\" --template='{\"spec\":{\"template\":{\"metadata\":{\"labels\":{ {{ range USDk,USDv := .spec.template.metadata.labels }}\"{{ USDk }}\":\"{{ USDv }}\",{{ end }}\"service-mesh.3scale.net/service-id\":\"'\"USD{SERVICE_ID}\"'\",\"service-mesh.3scale.net/credentials\":\"'\"USD{CREDENTIALS_NAME}\"'\"}}}}}' )\" patch deployment \"USD{DEPLOYMENT}\" --patch ''\"USD{patch}\"''",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization namespace: istio-system spec: template: authorization params: subject: user: request.query_params[\"user_key\"] | request.headers[\"user-key\"] | \"\" action: path: request.url_path method: request.method | \"get\"",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization namespace: istio-system spec: template: authorization params: subject: app_id: request.query_params[\"app_id\"] | request.headers[\"app-id\"] | \"\" app_key: request.query_params[\"app_key\"] | request.headers[\"app-key\"] | \"\" action: path: request.url_path method: request.method | \"get\"",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization spec: template: threescale-authorization params: subject: properties: app_key: request.query_params[\"app_key\"] | request.headers[\"app-key\"] | \"\" client_id: request.auth.claims[\"azp\"] | \"\" action: path: request.url_path method: request.method | \"get\" service: destination.labels[\"service-mesh.3scale.net/service-id\"] | \"\"",
"apiVersion: security.istio.io/v1beta1 kind: RequestAuthentication metadata: name: jwt-example namespace: bookinfo spec: selector: matchLabels: app: productpage jwtRules: - issuer: >- http://keycloak-keycloak.34.242.107.254.nip.io/auth/realms/3scale-keycloak jwksUri: >- http://keycloak-keycloak.34.242.107.254.nip.io/auth/realms/3scale-keycloak/protocol/openid-connect/certs",
"apiVersion: \"config.istio.io/v1alpha2\" kind: instance metadata: name: threescale-authorization spec: template: authorization params: subject: user: request.query_params[\"user_key\"] | request.headers[\"user-key\"] | properties: app_id: request.query_params[\"app_id\"] | request.headers[\"app-id\"] | \"\" app_key: request.query_params[\"app_key\"] | request.headers[\"app-key\"] | \"\" client_id: request.auth.claims[\"azp\"] | \"\" action: path: request.url_path method: request.method | \"get\" service: destination.labels[\"service-mesh.3scale.net/service-id\"] | \"\"",
"oc get pods -n istio-system",
"oc logs istio-system",
"oc delete smmr -n istio-system default",
"oc get smcp -n istio-system",
"oc delete smcp -n istio-system <name_of_custom_resource>",
"oc delete validatingwebhookconfiguration/openshift-operators.servicemesh-resources.maistra.io",
"oc delete mutatingwebhookconfiguration/openshift-operators.servicemesh-resources.maistra.io",
"oc delete -n openshift-operators daemonset/istio-node",
"oc delete clusterrole/istio-admin clusterrole/istio-cni clusterrolebinding/istio-cni",
"oc delete clusterrole istio-view istio-edit",
"oc delete clusterrole jaegers.jaegertracing.io-v1-admin jaegers.jaegertracing.io-v1-crdview jaegers.jaegertracing.io-v1-edit jaegers.jaegertracing.io-v1-view",
"oc get crds -o name | grep '.*\\.istio\\.io' | xargs -r -n 1 oc delete",
"oc get crds -o name | grep '.*\\.maistra\\.io' | xargs -r -n 1 oc delete",
"oc get crds -o name | grep '.*\\.kiali\\.io' | xargs -r -n 1 oc delete",
"oc delete crds jaegers.jaegertracing.io",
"oc delete svc admission-controller -n <operator-project>",
"oc delete project <istio-system-project>"
] | https://docs.redhat.com/en/documentation/openshift_container_platform/4.14/html-single/service_mesh/index |
8.4. Configuring Failover Domains | 8.4. Configuring Failover Domains A failover domain is a named subset of cluster nodes that are eligible to run a cluster service in the event of a node failure. A failover domain can have the following characteristics: Unrestricted - Allows you to specify that a subset of members are preferred, but that a cluster service assigned to this domain can run on any available member. Restricted - Allows you to restrict the members that can run a particular cluster service. If none of the members in a restricted failover domain are available, the cluster service cannot be started (either manually or by the cluster software). Unordered - When a cluster service is assigned to an unordered failover domain, the member on which the cluster service runs is chosen from the available failover domain members with no priority ordering. Ordered - Allows you to specify a preference order among the members of a failover domain. Ordered failover domains select the node with the lowest priority number first. That is, the node in a failover domain with a priority number of "1" specifies the highest priority, and therefore is the most preferred node in a failover domain. After that node, the preferred node would be the node with the highest priority number, and so on. Failback - Allows you to specify whether a service in the failover domain should fail back to the node that it was originally running on before that node failed. Configuring this characteristic is useful in circumstances where a node repeatedly fails and is part of an ordered failover domain. In that circumstance, if a node is the preferred node in a failover domain, it is possible for a service to fail over and fail back repeatedly between the preferred node and another node, causing severe impact on performance. Note The failback characteristic is applicable only if ordered failover is configured. Note Changing a failover domain configuration has no effect on currently running services. Note Failover domains are not required for operation. By default, failover domains are unrestricted and unordered. In a cluster with several members, using a restricted failover domain can minimize the work to set up the cluster to run a cluster service (such as httpd ), which requires you to set up the configuration identically on all members that run the cluster service. Instead of setting up the entire cluster to run the cluster service, you can set up only the members in the restricted failover domain that you associate with the cluster service. Note To configure a preferred member, you can create an unrestricted failover domain comprising only one cluster member. Doing that causes a cluster service to run on that cluster member primarily (the preferred member), but allows the cluster service to fail over to any of the other members. To configure a failover domain, use the following procedures: Open /etc/cluster/cluster.conf at any node in the cluster. Add the following skeleton section within the rm element for each failover domain to be used: Note The number of failoverdomainnode attributes depends on the number of nodes in the failover domain. The skeleton failoverdomain section in preceding text shows three failoverdomainnode elements (with no node names specified), signifying that there are three nodes in the failover domain. In the failoverdomain section, provide the values for the elements and attributes. For descriptions of the elements and attributes, see the failoverdomain section of the annotated cluster schema. The annotated cluster schema is available at /usr/share/doc/cman-X.Y.ZZ/cluster_conf.html (for example /usr/share/doc/cman-3.0.12/cluster_conf.html ) in any of the cluster nodes. For an example of a failoverdomains section, see Example 8.8, "A Failover Domain Added to cluster.conf " . Update the config_version attribute by incrementing its value (for example, changing from config_version="2" to config_version="3"> ). Save /etc/cluster/cluster.conf . (Optional) Validate the file against the cluster schema ( cluster.rng ) by running the ccs_config_validate command. For example: Run the cman_tool version -r command to propagate the configuration to the rest of the cluster nodes. Proceed to Section 8.5, "Configuring HA Services" . Example 8.8, "A Failover Domain Added to cluster.conf " shows an example of a configuration with an ordered, unrestricted failover domain. Example 8.8. A Failover Domain Added to cluster.conf The failoverdomains section contains a failoverdomain section for each failover domain in the cluster. This example has one failover domain. In the failoverdomain line, the name ( name ) is specified as example_pri . In addition, it specifies that resources using this domain should fail-back to lower-priority-score nodes when possible ( nofailback="0" ), that failover is ordered ( ordered="1" ), and that the failover domain is unrestricted ( restricted="0" ). Note: The priority value is applicable only if ordered failover is configured. | [
"<failoverdomains> <failoverdomain name=\"\" nofailback=\"\" ordered=\"\" restricted=\"\"> <failoverdomainnode name=\"\" priority=\"\"/> <failoverdomainnode name=\"\" priority=\"\"/> <failoverdomainnode name=\"\" priority=\"\"/> </failoverdomain> </failoverdomains>",
"ccs_config_validate Configuration validates",
"<cluster name=\"mycluster\" config_version=\"3\"> <clusternodes> <clusternode name=\"node-01.example.com\" nodeid=\"1\"> <fence> <method name=\"APC\"> <device name=\"apc\" port=\"1\"/> </method> </fence> </clusternode> <clusternode name=\"node-02.example.com\" nodeid=\"2\"> <fence> <method name=\"APC\"> <device name=\"apc\" port=\"2\"/> </method> </fence> </clusternode> <clusternode name=\"node-03.example.com\" nodeid=\"3\"> <fence> <method name=\"APC\"> <device name=\"apc\" port=\"3\"/> </method> </fence> </clusternode> </clusternodes> <fencedevices> <fencedevice agent=\"fence_apc\" ipaddr=\"apc_ip_example\" login=\"login_example\" name=\"apc\" passwd=\"password_example\"/> </fencedevices> <rm> <failoverdomains> <failoverdomain name=\"example_pri\" nofailback=\"0\" ordered=\"1\" restricted=\"0\"> <failoverdomainnode name=\"node-01.example.com\" priority=\"1\"/> <failoverdomainnode name=\"node-02.example.com\" priority=\"2\"/> <failoverdomainnode name=\"node-03.example.com\" priority=\"3\"/> </failoverdomain> </failoverdomains> </rm> </cluster>"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/cluster_administration/s1-config-failover-domain-cli-ca |
Chapter 1. Cluster health | Chapter 1. Cluster health 1.1. Verifying OpenShift Data Foundation is healthy Storage health is visible on the Block and File and Object dashboards. Procedure In the OpenShift Web Console, click Storage Data Foundation . In the Status card of the Overview tab, click Storage System and then click the storage system link from the pop up that appears. Check if the Status card has a green tick in the Block and File and the Object tabs. Green tick indicates that the cluster is healthy. See Section 1.2, "Storage health levels and cluster state" for information about the different health states and the alerts that appear. 1.2. Storage health levels and cluster state Status information and alerts related to OpenShift Data Foundation are displayed in the storage dashboards. 1.2.1. Block and File dashboard indicators The Block and File dashboard shows the complete state of OpenShift Data Foundation and the state of persistent volumes. The states that are possible for each resource type are listed in the following table. Table 1.1. OpenShift Data Foundation health levels State Icon Description UNKNOWN OpenShift Data Foundation is not deployed or unavailable. Green Tick Cluster health is good. Warning OpenShift Data Foundation cluster is in a warning state. In internal mode, an alert is displayed along with the issue details. Alerts are not displayed for external mode. Error OpenShift Data Foundation cluster has encountered an error and some component is nonfunctional. In internal mode, an alert is displayed along with the issue details. Alerts are not displayed for external mode. 1.2.2. Object dashboard indicators The Object dashboard shows the state of the Multicloud Object Gateway and any object claims in the cluster. The states that are possible for each resource type are listed in the following table. Table 1.2. Object Service health levels State Description Green Tick Object storage is healthy. Multicloud Object Gateway is not running Shown when NooBaa system is not found. All resources are unhealthy Shown when all NooBaa pools are unhealthy. Many buckets have issues Shown when >= 50% of buckets encounter error(s). Some buckets have issues Shown when >= 30% of buckets encounter error(s). Unavailable Shown when network issues and/or errors exist. 1.2.3. Alert panel The Alert panel appears below the Status card in both the Block and File dashboard and the Object dashboard when the cluster state is not healthy. Information about specific alerts and how to respond to them is available in Troubleshooting OpenShift Data Foundation . | null | https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.14/html/monitoring_openshift_data_foundation/cluster_health |
Chapter 2. Multicluster storage health | Chapter 2. Multicluster storage health To view the overall storage health status across all the clusters with OpenShift Data Foundation and manage its capacity, you must first enable the multicluster dashboard on the Hub cluster. 2.1. Enabling multicluster dashboard on Hub cluster You can enable the multicluster dashboard on the install screen either before or after installing ODF Multicluster Orchestrator with the console plugin. Prerequisites Ensure that you have installed OpenShift Container Platform version 4.16 and have administrator privileges. Ensure that you have installed Multicluster Orchestrator 4.16 operator with plugin for console enabled. Ensure that you have installed Red Hat Advanced Cluster Management for Kubernetes (RHACM) 2.11 from Operator Hub. For instructions on how to install, see Installing RHACM . Ensure you have enabled observability on RHACM. See Enabling observability guidelines . Procedure Create the configmap file named observability-metrics-custom-allowlist.yaml and add the name of the custom metric to the metrics_list.yaml parameter. You can use the following YAML to list the OpenShift Data Foundation metrics on Hub cluster. For details, see Adding custom metrics . Run the following command in the open-cluster-management-observability namespace: After observability-metrics-custom-allowlist yaml is created, RHACM will start collecting the listed OpenShift Data Foundation metrics from all the managed clusters. If you want to exclude specific managed clusters from collecting the observability data, add the following cluster label to your clusters: observability: disabled . To view the multicluster health, see chapter verifying multicluster storage dashboard . 2.2. Verifying multicluster storage health on hub cluster Prerequisites Ensure that you have enabled multicluster monitoring. For instructions, see chapter Enabling multicluster dashboard . Procedure In the OpenShift web console of Hub cluster, ensure All Clusters is selected. Navigate to Data Services and click Storage System . On the Overview tab, verify that there are green ticks in front of OpenShift Data Foundation and Systems . This indicates that the operator is running and all storage systems are available. In the Status card, Click OpenShift Data Foundation to view the operator status. Click Systems to view the storage system status. The Storage system capacity card shows the following details: Name of the storage system Cluster name Graphical representation of total and used capacity in percentage Actual values for total and used capacity in TiB | [
"kind: ConfigMap apiVersion: v1 metadata: name: observability-metrics-custom-allowlist Namespace: open-cluster-management-observability data: metrics_list.yaml: | names: - odf_system_health_status - odf_system_map - odf_system_raw_capacity_total_bytes - odf_system_raw_capacity_used_bytes matches: - __name__=\"csv_succeeded\",exported_namespace=\"openshift-storage\",name=~\"odf-operator.*\"",
"oc apply -n open-cluster-management-observability -f observability-metrics-custom-allowlist.yaml"
] | https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.16/html/monitoring_openshift_data_foundation/multicluster_storage_health |
Chapter 5. NFV Performance Considerations | Chapter 5. NFV Performance Considerations For a network functions virtualization (NFV) solution to be useful, its virtualized functions must meet or exceed the performance of physical implementations. Red Hat's virtualization technologies are based on the high-performance Kernel-based Virtual Machine (KVM) hypervisor, common in OpenStack and cloud deployments. 5.1. CPUs and NUMA nodes Previously, all memory on x86 systems was equally accessible to all CPUs in the system. This resulted in memory access times that were the same regardless of which CPU in the system was performing the operation and was referred to as Uniform Memory Access (UMA). In Non-Uniform Memory Access (NUMA), system memory is divided into zones called nodes, which are allocated to particular CPUs or sockets. Access to memory that is local to a CPU is faster than memory connected to remote CPUs on that system. Normally, each socket on a NUMA system has a local memory node whose contents can be accessed faster than the memory in the node local to another CPU or the memory on a bus shared by all CPUs. Similarly, physical NICs are placed in PCI slots on the Compute node hardware. These slots connect to specific CPU sockets that are associated to a particular NUMA node. For optimum performance, connect your datapath NICs to the same NUMA nodes in your CPU configuration (SR-IOV or OVS-DPDK). The performance impact of NUMA misses are significant, generally starting at a 10% performance hit or higher. Each CPU socket can have multiple CPU cores which are treated as individual CPUs for virtualization purposes. Tip For more information about NUMA, see What is NUMA and how does it work on Linux? 5.1.1. NUMA node example The following diagram provides an example of a two-node NUMA system and the way the CPU cores and memory pages are made available: Note Remote memory available via Interconnect is accessed only if VM1 from NUMA node 0 has a CPU core in NUMA node 1. In this case, the memory of NUMA node 1 acts as local for the third CPU core of VM1 (for example, if VM1 is allocated with CPU 4 in the diagram above), but at the same time, it acts as remote memory for the other CPU cores of the same VM. 5.1.2. NUMA aware instances You can configure an OpenStack environment to use NUMA topology awareness on systems with a NUMA architecture. When running a guest operating system in a virtual machine (VM) there are two NUMA topologies involved: the NUMA topology of the physical hardware of the host the NUMA topology of the virtual hardware exposed to the guest operating system You can optimize the performance of guest operating systems by aligning the virtual hardware with the physical hardware NUMA topology. 5.2. CPU pinning CPU pinning is the ability to run a specific virtual machine's virtual CPU on a specific physical CPU, in a given host. vCPU pinning provides similar advantages to task pinning on bare-metal systems. Since virtual machines run as user space tasks on the host operating system, pinning increases cache efficiency. For details on how to configure CPU pinning, see Configuring CPU pinning on the Compute node in the Instances and Images Guide . 5.3. Huge pages Physical memory is segmented into contiguous regions called pages. For efficiency, the system retrieves memory by accessing entire pages instead of individual bytes of memory. To perform this translation, the system looks in the Translation Lookaside Buffers (TLB) that contain the physical to virtual address mappings for the most recently or frequently used pages. When the system cannot find a mapping in the TLB, the processor must iterate through all of the page tables to determine the address mappings. Optimize the TLB to minimize the performance penalty that occurs during these TLB misses. The typical page size in an x86 system is 4KB, with other larger page sizes available. Larger page sizes mean that there are fewer pages overall, and therefore increases the amount of system memory that can have its virtual to physical address translation stored in the TLB. Consequently, this reduces TLB misses, which increases performance. With larger page sizes, there is an increased potential for memory to be under-utilized as processes must allocate in pages, but not all of the memory is likely required. As a result, choosing a page size is a compromise between providing faster access times with larger pages, and ensuring maximum memory utilization with smaller pages. 5.4. Port Security Port security is an anti-spoofing measure that blocks any egress traffic that does not match the source IP and source MAC address of the originating network port. You cannot view or modify this behavior using security group rules. By default, the port_security_enabled parameter is set to enabled on newly created Neutron networks in OpenStack. Newly created ports copy the value of the port_security_enabled parameter from the network they are created on. For some NFV use cases, such as building a firewall or router, you must disable port security. To disable port security on a single port, run the following command: To prevent port security from being enabled on any newly created port on a network, run the following command: | [
"openstack port set --disable-port-security <port-id>",
"openstack network set --disable-port-security <network-id>"
] | https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.0/html/network_functions_virtualization_product_guide/ch-nfv_tuning_for_performance |
Chapter 85. Build schema reference | Chapter 85. Build schema reference Used in: KafkaConnectSpec Full list of Build schema properties Configures additional connectors for Kafka Connect deployments. 85.1. output To build new container images with additional connector plugins, AMQ Streams requires a container registry where the images can be pushed to, stored, and pulled from. AMQ Streams does not run its own container registry, so a registry must be provided. AMQ Streams supports private container registries as well as public registries such as Quay or Docker Hub . The container registry is configured in the .spec.build.output section of the KafkaConnect custom resource. The output configuration, which is required, supports two types: docker and imagestream . Using Docker registry To use a Docker registry, you have to specify the type as docker , and the image field with the full name of the new container image. The full name must include: The address of the registry Port number (if listening on a non-standard port) The tag of the new container image Example valid container image names: docker.io/my-org/my-image/my-tag quay.io/my-org/my-image/my-tag image-registry.image-registry.svc:5000/myproject/kafka-connect-build:latest Each Kafka Connect deployment must use a separate image, which can mean different tags at the most basic level. If the registry requires authentication, use the pushSecret to set a name of the Secret with the registry credentials. For the Secret, use the kubernetes.io/dockerconfigjson type and a .dockerconfigjson file to contain the Docker credentials. For more information on pulling an image from a private registry, see Create a Secret based on existing Docker credentials . Example output configuration apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: type: docker 1 image: my-registry.io/my-org/my-connect-cluster:latest 2 pushSecret: my-registry-credentials 3 #... 1 (Required) Type of output used by AMQ Streams. 2 (Required) Full name of the image used, including the repository and tag. 3 (Optional) Name of the secret with the container registry credentials. Using OpenShift ImageStream Instead of Docker, you can use OpenShift ImageStream to store a new container image. The ImageStream has to be created manually before deploying Kafka Connect. To use ImageStream, set the type to imagestream , and use the image property to specify the name of the ImageStream and the tag used. For example, my-connect-image-stream:latest . Example output configuration apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: type: imagestream 1 image: my-connect-build:latest 2 #... 1 (Required) Type of output used by AMQ Streams. 2 (Required) Name of the ImageStream and tag. 85.2. plugins Connector plugins are a set of files that define the implementation required to connect to certain types of external system. The connector plugins required for a container image must be configured using the .spec.build.plugins property of the KafkaConnect custom resource. Each connector plugin must have a name which is unique within the Kafka Connect deployment. Additionally, the plugin artifacts must be listed. These artifacts are downloaded by AMQ Streams, added to the new container image, and used in the Kafka Connect deployment. The connector plugin artifacts can also include additional components, such as (de)serializers. Each connector plugin is downloaded into a separate directory so that the different connectors and their dependencies are properly sandboxed . Each plugin must be configured with at least one artifact . Example plugins configuration with two connector plugins apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: #... plugins: 1 - name: debezium-postgres-connector artifacts: - type: tgz url: https://repo1.maven.org/maven2/io/debezium/debezium-connector-postgres/2.3.2.Final/debezium-connector-postgres-2.3.2.Final-plugin.tar.gz sha512sum: 0145fa5138363603c8099cfc8b595f45cd6389a2d2248ecfbdd763849f534efaa5469d75b25395dfca6d87807202ccfbca34aa13a210092e598a97a73a46b6be - name: camel-telegram artifacts: - type: tgz url: https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-telegram-kafka-connector/0.11.5/camel-telegram-kafka-connector-0.11.5-package.tar.gz sha512sum: d6d9f45e0d1dbfcc9f6d1c7ca2046168c764389c78bc4b867dab32d24f710bb74ccf2a007d7d7a8af2dfca09d9a52ccbc2831fc715c195a3634cca055185bd91 #... 1 (Required) List of connector plugins and their artifacts. AMQ Streams supports the following types of artifacts: JAR files, which are downloaded and used directly TGZ archives, which are downloaded and unpacked ZIP archives, which are downloaded and unpacked Maven artifacts, which uses Maven coordinates Other artifacts, which are downloaded and used directly Important AMQ Streams does not perform any security scanning of the downloaded artifacts. For security reasons, you should first verify the artifacts manually, and configure the checksum verification to make sure the same artifact is used in the automated build and in the Kafka Connect deployment. Using JAR artifacts JAR artifacts represent a JAR file that is downloaded and added to a container image. To use a JAR artifacts, set the type property to jar , and specify the download location using the url property. Additionally, you can specify a SHA-512 checksum of the artifact. If specified, AMQ Streams will verify the checksum of the artifact while building the new container image. Example JAR artifact apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: #... plugins: - name: my-plugin artifacts: - type: jar 1 url: https://my-domain.tld/my-jar.jar 2 sha512sum: 589...ab4 3 - type: jar url: https://my-domain.tld/my-jar2.jar #... 1 (Required) Type of artifact. 2 (Required) URL from which the artifact is downloaded. 3 (Optional) SHA-512 checksum to verify the artifact. Using TGZ artifacts TGZ artifacts are used to download TAR archives that have been compressed using Gzip compression. The TGZ artifact can contain the whole Kafka Connect connector, even when comprising multiple different files. The TGZ artifact is automatically downloaded and unpacked by AMQ Streams while building the new container image. To use TGZ artifacts, set the type property to tgz , and specify the download location using the url property. Additionally, you can specify a SHA-512 checksum of the artifact. If specified, AMQ Streams will verify the checksum before unpacking it and building the new container image. Example TGZ artifact apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: #... plugins: - name: my-plugin artifacts: - type: tgz 1 url: https://my-domain.tld/my-connector-archive.tgz 2 sha512sum: 158...jg10 3 #... 1 (Required) Type of artifact. 2 (Required) URL from which the archive is downloaded. 3 (Optional) SHA-512 checksum to verify the artifact. Using ZIP artifacts ZIP artifacts are used to download ZIP compressed archives. Use ZIP artifacts in the same way as the TGZ artifacts described in the section. The only difference is you specify type: zip instead of type: tgz . Using Maven artifacts maven artifacts are used to specify connector plugin artifacts as Maven coordinates. The Maven coordinates identify plugin artifacts and dependencies so that they can be located and fetched from a Maven repository. Note The Maven repository must be accessible for the connector build process to add the artifacts to the container image. Example Maven artifact apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: #... plugins: - name: my-plugin artifacts: - type: maven 1 repository: https://mvnrepository.com 2 group: org.apache.camel.kafkaconnector 3 artifact: camel-kafka-connector 4 version: 0.11.0 5 #... 1 (Required) Type of artifact. 2 (Optional) Maven repository to download the artifacts from. If you do not specify a repository, Maven Central repository is used by default. 3 (Required) Maven group ID. 4 (Required) Maven artifact type. 5 (Required) Maven version number. Using other artifacts other artifacts represent any kind of file that is downloaded and added to a container image. If you want to use a specific name for the artifact in the resulting container image, use the fileName field. If a file name is not specified, the file is named based on the URL hash. Additionally, you can specify a SHA-512 checksum of the artifact. If specified, AMQ Streams will verify the checksum of the artifact while building the new container image. Example other artifact apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: #... build: output: #... plugins: - name: my-plugin artifacts: - type: other 1 url: https://my-domain.tld/my-other-file.ext 2 sha512sum: 589...ab4 3 fileName: name-the-file.ext 4 #... 1 (Required) Type of artifact. 2 (Required) URL from which the artifact is downloaded. 3 (Optional) SHA-512 checksum to verify the artifact. 4 (Optional) The name under which the file is stored in the resulting container image. 85.3. Build schema properties Property Description output Configures where should the newly built image be stored. Required. The type depends on the value of the output.type property within the given object, which must be one of [docker, imagestream]. DockerOutput , ImageStreamOutput resources CPU and memory resources to reserve for the build. For more information, see the external documentation for core/v1 resourcerequirements . ResourceRequirements plugins List of connector plugins which should be added to the Kafka Connect. Required. Plugin array | [
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: type: docker 1 image: my-registry.io/my-org/my-connect-cluster:latest 2 pushSecret: my-registry-credentials 3 #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: type: imagestream 1 image: my-connect-build:latest 2 #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: # plugins: 1 - name: debezium-postgres-connector artifacts: - type: tgz url: https://repo1.maven.org/maven2/io/debezium/debezium-connector-postgres/2.3.2.Final/debezium-connector-postgres-2.3.2.Final-plugin.tar.gz sha512sum: 0145fa5138363603c8099cfc8b595f45cd6389a2d2248ecfbdd763849f534efaa5469d75b25395dfca6d87807202ccfbca34aa13a210092e598a97a73a46b6be - name: camel-telegram artifacts: - type: tgz url: https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-telegram-kafka-connector/0.11.5/camel-telegram-kafka-connector-0.11.5-package.tar.gz sha512sum: d6d9f45e0d1dbfcc9f6d1c7ca2046168c764389c78bc4b867dab32d24f710bb74ccf2a007d7d7a8af2dfca09d9a52ccbc2831fc715c195a3634cca055185bd91 #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: # plugins: - name: my-plugin artifacts: - type: jar 1 url: https://my-domain.tld/my-jar.jar 2 sha512sum: 589...ab4 3 - type: jar url: https://my-domain.tld/my-jar2.jar #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: # plugins: - name: my-plugin artifacts: - type: tgz 1 url: https://my-domain.tld/my-connector-archive.tgz 2 sha512sum: 158...jg10 3 #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: # plugins: - name: my-plugin artifacts: - type: maven 1 repository: https://mvnrepository.com 2 group: org.apache.camel.kafkaconnector 3 artifact: camel-kafka-connector 4 version: 0.11.0 5 #",
"apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaConnect metadata: name: my-connect-cluster spec: # build: output: # plugins: - name: my-plugin artifacts: - type: other 1 url: https://my-domain.tld/my-other-file.ext 2 sha512sum: 589...ab4 3 fileName: name-the-file.ext 4 #"
] | https://docs.redhat.com/en/documentation/red_hat_streams_for_apache_kafka/2.5/html/amq_streams_api_reference/type-build-reference |
function::kernel_pointer | function::kernel_pointer Name function::kernel_pointer - Retrieves a pointer value stored in kernel memory Synopsis Arguments addr The kernel address to retrieve the pointer from Description Returns the pointer value from a given kernel memory address. Reports an error when reading from the given address fails. | [
"kernel_pointer:long(addr:long)"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/systemtap_tapset_reference/api-kernel-pointer |
Installation Guide | Installation Guide Red Hat Ceph Storage 7 Installing Red Hat Ceph Storage on Red Hat Enterprise Linux Red Hat Ceph Storage Documentation Team | null | https://docs.redhat.com/en/documentation/red_hat_ceph_storage/7/html/installation_guide/index |
Chapter 11. SecretList [image.openshift.io/v1] | Chapter 11. SecretList [image.openshift.io/v1] Description SecretList is a list of Secret. Type object Required items 11.1. Specification Property Type Description apiVersion string APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources items array (Secret) Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret kind string Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds metadata ListMeta Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 11.2. API endpoints The following API endpoints are available: /apis/image.openshift.io/v1/namespaces/{namespace}/imagestreams/{name}/secrets GET : read secrets of the specified ImageStream 11.2.1. /apis/image.openshift.io/v1/namespaces/{namespace}/imagestreams/{name}/secrets Table 11.1. Global path parameters Parameter Type Description name string name of the SecretList namespace string object name and auth scope, such as for teams and projects Table 11.2. Global query parameters Parameter Type Description allowWatchBookmarks boolean allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. continue string The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the key, but from the latest snapshot, which is inconsistent from the list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the " key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. fieldSelector string A selector to restrict the list of returned objects by their fields. Defaults to everything. labelSelector string A selector to restrict the list of returned objects by their labels. Defaults to everything. limit integer limit is a maximum number of responses to return for a list call. If more items exist, the server will set the continue field on the list metadata to a value that can be used with the same initial query to retrieve the set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. pretty string If 'true', then the output is pretty printed. resourceVersion string resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset resourceVersionMatch string resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset timeoutSeconds integer Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. watch boolean Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. HTTP method GET Description read secrets of the specified ImageStream Table 11.3. HTTP responses HTTP code Reponse body 200 - OK SecretList schema 401 - Unauthorized Empty | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.12/html/image_apis/secretlist-image-openshift-io-v1 |
Chapter 1. Using the MicroShift configuration file | Chapter 1. Using the MicroShift configuration file A YAML file customizes MicroShift instances with your preferences, settings, and parameters. Note If you want to make configuration changes or deploy applications through the MicroShift API with tools other than kustomize manifests, you must wait until the greenboot health checks have finished. This ensures that your changes are not lost if greenboot rolls your rpm-ostree system back to an earlier state. 1.1. The MicroShift YAML configuration file At start up, MicroShift checks the system-wide /etc/microshift/ directory for a configuration file named config.yaml . If the configuration file does not exist in the directory, built-in default values are used to start the service. The MicroShift configuration file must be used in combination with host and, sometimes, application and service settings. Ensure that each item is configured in tandem when you customize your MicroShift cluster. 1.1.1. Default settings If you do not create a config.yaml file or use a configuration snippet YAML file, default values are used. The following example shows the default configuration settings. To see the default values, run the following command: USD microshift show-config Default values example output in YAML form apiServer: advertiseAddress: 10.44.0.0/32 1 auditLog: maxFileAge: 0 maxFileSize: 200 maxFiles: 10 profile: Default namedCertificates: - certPath: "" keyPath: "" names: - "" subjectAltNames: [] debugging: logLevel: "Normal" dns: baseDomain: microshift.example.com etcd: memoryLimitMB: 0 ingress: defaultHTTPVersion: 1 forwardedHeaderPolicy: "" httpCompression: mimeTypes: - "" httpEmptyRequestsPolicy: Respond listenAddress: - "" logEmptyRequests: Log ports: http: 80 https: 443 routeAdmissionPolicy: namespaceOwnership: InterNamespaceAllowed status: Managed tuningOptions: clientFinTimeout: "" clientTimeout: "" headerBufferBytes: 0 headerBufferMaxRewriteBytes: 0 healthCheckInterval: "" maxConnections: 0 serverFinTimeout: "" serverTimeout: "" threadCount: 0 tlsInspectDelay: "" tunnelTimeout: "" kubelet: manifests: kustomizePaths: - /usr/lib/microshift/manifests - /usr/lib/microshift/manifests.d/* - /etc/microshift/manifests - /etc/microshift/manifests.d/* network: clusterNetwork: - 10.42.0.0/16 serviceNetwork: - 10.43.0.0/16 serviceNodePortRange: 30000-32767 node: hostnameOverride: "" nodeIP: "" 2 nodeIPv6: "" storage: driver: "" 3 optionalCsiComponents: 4 - "" 1 Calculated based on the address of the service network. 2 The IP address of the default route. 3 Default null value deploys Logical Volume Managed Storage (LVMS). 4 Default null value deploys snapshot-controller . 1.2. Using custom settings To create custom configurations, make a copy of the config.yaml.default file that is provided in the /etc/microshift/ directory, renaming it config.yaml . Keep this file in the /etc/microshift/ directory, and then you can change supported settings that are expected to override the defaults before starting or restarting MicroShift. Important Restart MicroShift after changing any configuration settings to have them take effect. The config.yaml file is read only when MicroShift starts. 1.2.1. Separate restarts Applications and other optional services used with your MicroShift cluster might also need to be restarted separately to apply configuration changes throughout the cluster. For example, when making changes to certain networking settings, you must stop and restart service and application pods to apply those changes. See each procedure for the task you are completing for more information. Tip If you add all of the configurations you need at the same time, you can minimize system restarts. 1.2.2. Parameters and values for the MicroShift config.yaml file The following table explains MicroShift configuration YAML parameters and valid values for each: Table 1.1. MicroShift config.yaml parameters Field Type Description advertiseAddress string A string that specifies the IP address from which the API server is advertised to members of the cluster. The default value is calculated based on the address of the service network. auditLog.maxFileAge number How long log files are kept before automatic deletion. The default value of 0 in the maxFileAge parameter means a log file is never deleted based on age. This value can be configured. auditLog.maxFileSize number By default, when the audit.log file reaches the maxFileSize limit, the audit.log file is rotated and MicroShift begins writing to a new audit.log file. This value can be configured. auditLog.maxFiles number The total number of log files kept. By default, MicroShift retains 10 log files. The oldest is deleted when an excess file is created. This value can be configured. auditLog.profile Default , WriteRequestBodies , AllRequestBodies , or None Logs only metadata for read and write requests; does not log request bodies except for OAuth access token requests. If you do not specify this field, the Default profile is used. namedCertificates list Defines externally generated certificates and domain names by using custom certificate authorities. namedCertificates.certPath path The full path to the certificate. namedCertificates.keyPath path The full path to the certificate key. namedCertificates.names list Optional. Add a list of explicit DNS names. Leading wildcards are allowed. If no names are provided, the implicit names are extracted from the certificates. subjectAltNames Fully qualified domain names (FQDNs), wildcards such as *.domain.com , or IP addresses. Subject Alternative Names for API server certificates. SANs indicate all of the domain names and IP addresses that are secured by a certificate. debugging.logLevel Normal , Debug , Trace , or TraceAll Log verbosity. Default is Normal . dns.baseDomain valid domain Base domain of the cluster. All managed DNS records are subdomains of this base. etcd.memoryLimitMB number By default, etcd uses as much memory as needed to handle the load on the system. However, in memory constrained systems, it might be preferred or necessary to limit the amount of memory etcd can to use at a given time. ingress.defaultHTTPVersion number Determines the default HTTP version to be used for ingress. Default value is 1 , which is the HTTP/1.1 protocol. ingress.forwardedHeaderPolicy Append , Replace , IfNone , Never Specifies when and how the ingress router sets the Forwarded , X-Forwarded-For , X-Forwarded-Host , X-Forwarded-Port , X-Forwarded-Proto , and X-Forwarded-Proto-Version HTTP headers. Append specifies that the ingress router appends existing headers. Append is the default value. Replace specifies that the ingress router sets the headers and replaces any existing Forwarded or X-Forwarded-* headers. IfNone specifies that the ingress router sets headers if they are not already set. Never specifies that ingress router never sets the headers, preserving any existing headers. ingress.httpCompression object httpCompression defines a policy for HTTP traffic compression. There is no HTTP compression by default. ingress.httpCompression.mimeTypes array or null mimeTypes is a list of MIME types to compress. When the list is empty, the ingress controller does not apply any compression. To define a list, use the format of the Content-Type definition in RFC 1341 that specifies the type and subtype of data in the body of a message and the native encoding of the data. For example, Content-Type := type \"/\" subtype *[\";\" parameter] . The value of Content-Type can be one of the following types: application, audio, image, message, multipart, text, video, or a custom type preceded by \"X-\" and followed by a token. The token must be defined in one of the following ways: The token is a string of at least one character, and does not contain white spaces, control characters, or any of the characters in the tspecials set. The tspecials set contains the characters ()\u003c\u003e@,;:\\\"/[]?.= . The subtype in Content-Type is also a token. The optional parameters following the subtype are defined as token \"=\" (token / quoted-string) . The quoted-string , as defined in RFC 822, is surrounded by double quotes and can contain white spaces plus any character except \\ , \" , and CR . The quoted-string can also contain any single ASCII character if it is escaped by the following characters: \\.", . Not all MIME types benefit from compression, but HAProxy uses resources to try to compress files when compression is configured. Generally speaking, text formats such as html , ccs , and js benefit from compression. Spending CPU resources to compress file types that are already compressed, such as images, audio, and video, is probably not worth the limited benefit. ingress.httpEmptyRequestsPolicy Respond or Ignore The default value is Respond . Describes how HTTP connections should be handled if the connection times out before a request is received. These connections typically come from the health probes of a load balancer service health or a web browser's speculative connections, such as a preconnect . If the field is set to Respond , the ingress controller sends an "HTTP 400" or "408" response, logs the connection if access logging is enabled, and counts the connection in the appropriate metrics. If the field is set to Ignore , the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. Setting this field to Ignore might impede detection and diagnosis of problems or intrusions, especially when timed-out connections are caused by network errors or port scans. In both cases, logging empty requests can be useful for diagnosing errors and detecting intrusion attempts. ingress.listenAddress IP address, NIC name, or multiple Value defaults to the entire network of the host. The valid configurable value is a list that can be either a single IP address or NIC name or multiple IP addresses and NIC names. ingress.logEmptyRequests Log or Ignore Default value is Log . Specifies how connections on which empty requests are received are logged. These connections typically come from the health probes of a load balancer service health or a web browser's speculative connections, such as a preconnect . Logging typical requests might be undesirable, but requests can also be caused by network errors or port scans, in which case logging can be useful for diagnosing errors and detecting intrusion attempts. ingress.ports.http 80 Default port shown. Configurable. Valid value is a single, unique port in the 1-65535 range. The values of the ports.http and ports.https fields cannot be the same. ingress.ports.https 443 Default port shown. Configurable. Valid value is a single, unique port in the 1-65535 range. The values of the ports.http and ports.https fields cannot be the same. ingress.routeAdmissionPolicy.namespaceOwnership Strict or InterNamespaceAllowed Describes how hostname claims across namespaces are handled. By default, allows routes to claim different paths of the same hostname across namespaces. Specifying Strict prevents routes in different namespaces from claiming the same hostname. If the value is deleted in a customized MicroShift config.yaml , the InterNamespaceAllowed value is automatically set. ingress.status Managed or Removed Router status. Default is Managed . ingress.tuningOptions Objects Specifies options for tuning the performance of ingress controller pods. ingress.tuningOptions.clientFinTimeout string with format duration Defines how long a connection is held open while waiting for a client response to the server/backend before closing the connection. The default timeout is 1s , which is 1 second. ingress.tuningOptions.clientTimeout string with format duration Defines how long a connection is held open while waiting for a client response. The default timeout is 30s , which is 30 seconds. ingress.tuningOptions.headerBufferBytes An integer with the format of int32 ; 16384 is the minimum value when HTTP/2 is enabled. Describes how much memory in bytes must be reserved for IngressController connection sessions. Default value is 32768 in bytes. Setting this field is generally not recommended because headerBufferBytes values that are too small can break the IngressController and headerBufferBytes values that are too large can cause the IngressController to use significantly more memory than necessary. ingress.tuningOptions.headerBufferMaxRewriteBytes integer , formatted int32 ; 4096 is the minimum value Describes how much memory in bytes must be reserved from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Default value is 8192 bytes. Incoming HTTP requests are limited to the headerBufferBytes bytes minus the headerBufferMaxRewriteBytes bytes, meaning that the value of headerBufferBytes must be greater than the value of headerBufferMaxRewriteBytes . Setting this field is generally not recommended because headerBufferMaxRewriteBytes values that are too small can break the IngressController and headerBufferMaxRewriteBytes values that are too large can cause the IngressController to use significantly more memory than necessary. ingress.tuningOptions.healthCheckInterval: "" string with pattern: ^(0|( (\\.[0-9] )?(ns|us|ms|ms|ms|s|m|h))+)USD The default healthCheckInterval value is 5s , which is 5 seconds. This parameter value defines how long the router waits between two consecutive health checks on the router's configured backends. Currently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms , which is 24.85 days. The range might change in future releases. This value is applied globally as a default for all routes, but can be overridden per-route by the route annotation router.openshift.io/haproxy.health.check.interval . Requires an unsigned duration string of decimal numbers, each with an optional fraction and unit suffix, such as 300ms , 1.5h or 2h45m . Valid time units are ns , us (or ms U+00B5 or ms U+03BC), ms , s , m , h . Setting this parameter value to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Setting this parameter value too high can result in increased latency because of backend servers that are no longer available, but have not yet been detected as such. An empty or 0 value means "no opinion" and the ingress controller chooses a default. Note that the default value might change in future releases. ingress.tuningOptions.maxConnections integer , valid values are: empty , 0 , -1 , and the range 2000-2000000 Default value is 0 . defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections at the cost of additional system resources being consumed. If this field is empty or 0 , the IngressController uses the default value of 50000 , but the default is subject to change in future releases. If the value is -1 , then HAProxy dynamically computes a maximum value based on the available resources set with ulimit values in the running container. Selecting -1 , which means auto , results in a large value being computed, and therefore each HAProxy process incurs significant memory usage compared with the current default of 50000 . Setting a value that is greater than the current operating system limit prevents the HAProxy process from starting. You can monitor memory usage for router containers with the following metric: container_memory_working_set_bytes{container=`router`,namespace=`openshift-ingress`}` You can monitor memory usage of individual `HAProxy`processes in router containers with the following metric: container_memory_working_set_bytes{container=`router`,namespace=`openshift-ingress`}/container_processes{container=`router`,namespace=`openshift-ingress`} ingress.tuningOptions.serverFinTimeout string in the format duration Defines how long a connection is held open while waiting for a server or backend response to the client before closing the connection. The default timeout is 1s . ingress.tuningOptions.serverTimeout string in the format duration Defines how long a connection is held open while waiting for a server or backend response. The default timeout is 30s . ingress.tuningOptions.threadCount integer in the form int32 ; minimum value is 1 , maximum is 64 Defines the number of threads created per HAProxy process. The default value is 4 . If this field is empty, the default value is used. Setting this field is generally not recommended. Creating more threads allows each ingress controller pod to handle more connections at the cost of more system resources being used. Increasing the number of HAProxy threads allows the ingress controller pods to use more CPU time under load, potentially starving other pods if set too high. Conversely, reducing the number of threads may cause the ingress controller to perform poorly. ingress.tuningOptions.tlsInspectDelay string in the format duration Defines how long the router can hold data to find a matching route. Setting this interval with too short a value can cause the router to revert to the default certificate for edge-terminated clients or re-encrypt routes, even when a better-matching certificate could be used. The default inspect delay is 5s which is 5 seconds, which is expected to be sufficient for most cases. Increasing the value of this configuration specifically for high-latency networks can cause a delay in finishing the SSL handshake. Any configured value must be transparent to your application. ingress.tuningOptions.tunnelTimeout string in the format duration Defines how long a tunnel connection, including websockets, are held open while the tunnel is idle. The default timeout is 1h , which is 1 hour. kubelet See the MicroShift low-latency instructions Parameter for passthrough configuration of the kubelet node agent. Used for low-latency configuration. Default value is null. manifests list of paths The locations on the file system to scan for kustomization files to use to load manifests. Set to a list of paths to scan only those paths. Set to an empty list to disable loading manifests. The entries in the list can be glob patterns to match multiple subdirectories. Default values are /usr/lib/microshift/manifests , /usr/lib/microshift/manifests.d/ , /etc/microshift/manifests , and /etc/microshift/manifests.d/ . network.clusterNetwork IP address block A block of IP addresses from which pod IP addresses are allocated. IPv4 is the default network. Dual-stack entries are supported. The first entry in this field is immutable after MicroShift starts. Default range is 10.42.0.0/16 . network.serviceNetwork IP address block A block of virtual IP addresses for Kubernetes services. IP address pool for services. IPv4 is the default. Dual-stack entries are supported. The first entry in this field is immutable after MicroShift starts. Default range is 10.43.0.0/16 . network.serviceNodePortRange range The port range allowed for Kubernetes services of type NodePort . If not specified, the default range of 30000-32767 is used. Services without a NodePort specified are automatically allocated one from this range. This parameter can be updated after MicroShift starts. node.hostnameOverride string The name of the node. The default value is the hostname. If non-empty, this string is used to identify the node instead of the hostname. This value is immutable after MicroShift starts. node.nodeIP IPv4 address The IPv4 address of the node. The default value is the IP address of the default route. nodeIPv6 IPv6 address The IPv6 address for the node for dual-stack configurations. Cannot be configured in single stack for either IPv4 or IPv6. Default is an empty value or null. storage.driver none or lvms Default value is empty. An empty value or null field defaults to LVMS deployment. storage.optionalCsiComponents array Default value is null or an empty array. A null or empty array defaults to deploying snapshot-controller . Expected values are csi-snapshot-controller or none . A value of none is mutually exclusive with all other values. 1.2.3. Using configuration snippets If you want to configure one or two settings, such as adding subject alternative names (SANs), you can use the /etc/microshift/config.d/ configuration directory to drop in configuration snippet YAML files. You must restart MicroShift for new configurations to apply. To return to values, you can delete a configuration snippet and restart MicroShift. 1.2.3.1. How configuration snippets work At runtime, the YAML files inside /etc/microshift/config.d are merged into the existing MicroShift configuration, whether that configuration is a result of default values or a user-created config.yaml file. You do not need to create a config.yaml file to use a configuration snippet. Files in the snippet directory are sorted in lexicographical order and run sequentially. You can use numerical prefixes for snippets so that each is read in the order you want. The last-read file takes precedence when there is more than one YAML for the same parameter. Important Configuration snippets take precedence over both default values and a customized config.yaml configuration file. 1.2.3.2. Example list configuration snippets Lists, or arrays, are not merged, they are overwritten. For example, you can replace a SAN or list of SANs by creating an additional snippet for the same field that is read after the first: MicroShift configuration directory contents /etc/microshift/config.yaml.default or /etc/microshift/config.yaml Example MicroShift configuration snippet directory contents /etc/microshift/config.d/10-san.yaml /etc/microshift/config.d/20-san.yaml Example 10-san.yaml snippet apiServer: subjectAltNames: - host1 - host2 Example 20-san.yaml snippet apiServer: subjectAltNames: - hostZ Example configuration result apiServer: subjectAltNames: - hostZ If you want to add a value to an existing list, you can add it to an existing snippet. For example, to add hostZ to an existing list of SANs, edit the snippet you have instead of creating a new one: Example 10-san.yaml snippet apiServer: subjectAltNames: - host1 - host2 - hostZ Example configuration result apiServer: subjectAltNames: - host1 - host2 - hostZ 1.2.3.3. Example object configuration snippets Objects are merged together. Example 10-advertiseAddress.yaml snippet apiServer: advertiseAddress: "microshift-example" Example 20-audit-log.yaml snippet apiServer: auditLog: maxFileAge: 12 Example configuration result apiServer: advertiseAddress: "microshift-example" auditLog: maxFileAge: 12 1.2.3.4. Example mixed configuration snippets In this example, the values of both advertiseAddress and auditLog.maxFileAge fields are merged into the configuration, but only the c.com and d.com subjectAltNames values are retained because the numbering in the filename indicates that they are higher priority. Example 10-advertiseAddress.yaml snippet apiServer: advertiseAddress: "microshift-example" Example 20-audit-log.yaml snippet apiServer: auditLog: maxFileAge: 12 Example 30-SAN.yaml snippet apiServer: subjectAltNames: - a.com - b.com Example 40-SAN.yaml snippet apiServer: subjectAltNames: - c.com - d.com Example configuration result apiServer: advertiseAddress: "microshift-example" auditLog: maxFileAge: 12 subjectAltNames: - c.com - d.com 1.2.4. Configuring the advertise address network flag The apiserver.advertiseAddress flag specifies the IP address on which to advertise the API server to members of the cluster. This address must be reachable by the cluster. You can set a custom IP address here, but you must also add the IP address to a host interface. Customizing this parameter preempts MicroShift from adding a default IP address to the br-ex network interface. Important If you customize the advertiseAddress IP address, make sure it is reachable by the cluster when MicroShift starts by adding the IP address to a host interface. If unset, the default value is set to the immediate subnet after the service network. For example, when the service network is 10.43.0.0/16 , the advertiseAddress is set to 10.44.0.0/32 . 1.2.5. Extending the port range for NodePort services The serviceNodePortRange setting extends the port range available to NodePort services. This option is useful when specific standard ports under the 30000-32767 range need to be exposed. For example, if your device needs to expose the 1883/tcp MQ Telemetry Transport (MQTT) port on the network because client devices cannot use a different port. Important NodePorts can overlap with system ports, causing a malfunction of the system or MicroShift. Consider the following when configuring the NodePort service ranges: Do not create any NodePort service without an explicit nodePort selection. When an explicit nodePort is not specified, the port is assigned randomly by the kube-apiserver and cannot be predicted. Do not create any NodePort service for any system service port, MicroShift port, or other services you expose on your device HostNetwork . Table one specifies ports to avoid when extending the port range: Table 1.2. Ports to avoid. Port Description 22/tcp SSH port 80/tcp OpenShift Router HTTP endpoint 443/tcp OpenShift Router HTTPS endpoint 1936/tcp Metrics service for the openshift-router, not exposed today 2379/tcp etcd port 2380/tcp etcd port 6443 kubernetes API 8445/tcp openshift-route-controller-manager 9537/tcp cri-o metrics 10250/tcp kubelet 10248/tcp kubelet healthz port 10259/tcp kube scheduler 1.3. Additional resources Checking Greenboot status | [
"microshift show-config",
"apiServer: advertiseAddress: 10.44.0.0/32 1 auditLog: maxFileAge: 0 maxFileSize: 200 maxFiles: 10 profile: Default namedCertificates: - certPath: \"\" keyPath: \"\" names: - \"\" subjectAltNames: [] debugging: logLevel: \"Normal\" dns: baseDomain: microshift.example.com etcd: memoryLimitMB: 0 ingress: defaultHTTPVersion: 1 forwardedHeaderPolicy: \"\" httpCompression: mimeTypes: - \"\" httpEmptyRequestsPolicy: Respond listenAddress: - \"\" logEmptyRequests: Log ports: http: 80 https: 443 routeAdmissionPolicy: namespaceOwnership: InterNamespaceAllowed status: Managed tuningOptions: clientFinTimeout: \"\" clientTimeout: \"\" headerBufferBytes: 0 headerBufferMaxRewriteBytes: 0 healthCheckInterval: \"\" maxConnections: 0 serverFinTimeout: \"\" serverTimeout: \"\" threadCount: 0 tlsInspectDelay: \"\" tunnelTimeout: \"\" kubelet: manifests: kustomizePaths: - /usr/lib/microshift/manifests - /usr/lib/microshift/manifests.d/* - /etc/microshift/manifests - /etc/microshift/manifests.d/* network: clusterNetwork: - 10.42.0.0/16 serviceNetwork: - 10.43.0.0/16 serviceNodePortRange: 30000-32767 node: hostnameOverride: \"\" nodeIP: \"\" 2 nodeIPv6: \"\" storage: driver: \"\" 3 optionalCsiComponents: 4 - \"\"",
"container_memory_working_set_bytes{container=`router`,namespace=`openshift-ingress`}`",
"container_memory_working_set_bytes{container=`router`,namespace=`openshift-ingress`}/container_processes{container=`router`,namespace=`openshift-ingress`}",
"apiServer: subjectAltNames: - host1 - host2",
"apiServer: subjectAltNames: - hostZ",
"apiServer: subjectAltNames: - hostZ",
"apiServer: subjectAltNames: - host1 - host2 - hostZ",
"apiServer: subjectAltNames: - host1 - host2 - hostZ",
"apiServer: advertiseAddress: \"microshift-example\"",
"apiServer: auditLog: maxFileAge: 12",
"apiServer: advertiseAddress: \"microshift-example\" auditLog: maxFileAge: 12",
"apiServer: advertiseAddress: \"microshift-example\"",
"apiServer: auditLog: maxFileAge: 12",
"apiServer: subjectAltNames: - a.com - b.com",
"apiServer: subjectAltNames: - c.com - d.com",
"apiServer: advertiseAddress: \"microshift-example\" auditLog: maxFileAge: 12 subjectAltNames: - c.com - d.com"
] | https://docs.redhat.com/en/documentation/red_hat_build_of_microshift/4.18/html/configuring/using-the-microshift-configuration-file |
Providing feedback on Red Hat build of OpenJDK documentation | Providing feedback on Red Hat build of OpenJDK documentation To report an error or to improve our documentation, log in to your Red Hat Jira account and submit an issue. If you do not have a Red Hat Jira account, then you will be prompted to create an account. Procedure Click the following link to create a ticket . Enter a brief description of the issue in the Summary . Provide a detailed description of the issue or enhancement in the Description . Include a URL to where the issue occurs in the documentation. Clicking Submit creates and routes the issue to the appropriate documentation team. | null | https://docs.redhat.com/en/documentation/red_hat_build_of_openjdk/17/html/release_notes_for_red_hat_build_of_openjdk_17.0.4/proc-providing-feedback-on-redhat-documentation |
Chapter 5. Viewing and exporting logs | Chapter 5. Viewing and exporting logs Activity logs are gathered for all repositories and namespaces (users and organizations) in Red Hat Quay. There are multiple ways of accessing log files, including: Viewing logs through the web UI Exporting logs so they can be saved externally. Accessing log entries via the API To access logs, you must have Admin privilege to the selected repository or namespace. Note A maximum of 100 log results are available at a time via the API. To gather more results that that, you must use the log exporter feature described in this chapter. 5.1. Viewing logs To view log entries for a repository or namespace from the web UI, do the following: Select a repository or namespace (organization or user) for which you have Admin privileges. Select the Usage Logs icon from the left column. A Usage Logs screen appears, like the one shown in the following figure: From the Usage Logs page, you can: Set the date range for viewing log entries by adding dates to the From and to boxes. By default, the most recent one week of log entries is displayed. Type a string into the Filter Logs box to display log entries that container the given string. Toggle the arrow to the left of any log entry to see more or less text associated with that log entry. 5.2. Exporting repository logs To be able to grab a larger number of log files and save them outside of the Red Hat Quay database, you can use the Export Logs feature. Here are a few things you should know about using Export Logs: You can choose a range of dates for the logs you want to gather from a repository. You can request that the logs be sent to you via an email attachment or directed to a callback URL. You need Admin privilege to the repository or namespace to export logs A maximum of 30 days of log data can be exported at a time Export Logs only gathers log data that was previously produced. It does not stream logging data. Your Red Hat Quay instance must be configured for external storage for this feature (local storage will not work). Once the logs are gathered and available, you should immediately copy that data if you want to save it. By default, the data expires in an hour. To use the Export Logs feature: Select a repository for which you have Admin privileges. Select the Usage Logs icon from the left column. A Usage Logs screen appears. Choose the From and to date range of the log entries you want to gather. Select the Export Logs button. An Export Usage Logs pop-up appears, as shown Enter the email address or callback URL you want to receive the exported logs. For the callback URL, you could use a URL to a place such as webhook.site. Select Start Logs Export. This causes Red Hat Quay to begin gathering the selected log entries. Depending on the amount of logging data being gathered, this can take anywhere from one minute to an hour to complete. When the log export is completed you will either: Receive an email, alerting you to the availability of your requested exported log entries. See a successful status of your log export request from the webhook URL. A link to the exported data will be available for you to select to download the logs. Keep in mind that the URL points to a location in your Red Hat Quay external storage and is set to expire within an hour. So make sure you copy the exported logs before that expiration time if you intend to keep them. | null | https://docs.redhat.com/en/documentation/red_hat_quay/3.9/html/use_red_hat_quay/use-quay-view-export-logs |
Chapter 10. Working with pods | Chapter 10. Working with pods Containers are the smallest unit that you can manage with Podman, Skopeo and Buildah container tools. A Podman pod is a group of one or more containers. The Pod concept was introduced by Kubernetes. Podman pods are similar to the Kubernetes definition. Pods are the smallest compute units that you can create, deploy, and manage in OpenShift or Kubernetes environments. Every Podman pod includes an infra container. This container holds the namespaces associated with the pod and allows Podman to connect other containers to the pod. It allows you to start and stop containers within the pod and the pod will stay running. The default infra container on the registry.access.redhat.com/ubi9/pause image. 10.1. Creating pods You can create a pod with one container. Prerequisites The container-tools meta-package is installed. Procedure Create an empty pod: The pod is in the initial state Created. Optional: List all pods: Notice that the pod has one container in it. Optional: List all pods and containers associated with them: You can see that the pod ID from podman ps command matches the pod ID in the podman pod ps command. The default infra container is based on the registry.access.redhat.com/ubi9/pause image. Run a container named myubi in the existing pod named mypod : Optional: List all pods: You can see that the pod has two containers in it. Optional: List all pods and containers associated with them: Additional resources podman-pod-create man page on your system Podman: Managing pods and containers in a local container runtime 10.2. Displaying pod information Learn about how to display pod information. Prerequisites The container-tools meta-package is installed. The pod has been created. For details, see section Creating pods . Procedure Display active processes running in a pod: To display the running processes of containers in a pod, enter: To display a live stream of resource usage stats for containers in one or more pods, enter: To display information describing the pod, enter: You can see information about containers in the pod. Additional resources podman pod top , podman-pod-stats , and podman-pod-inspect man pages on your system 10.3. Stopping pods You can stop one or more pods using the podman pod stop command. Prerequisites The container-tools meta-package is installed. The pod has been created. For details, see section Creating pods . Procedure Stop the pod mypod : Optional: List all pods and containers associated with them: You can see that the pod mypod and container myubi are in "Exited" status. Additional resources podman-pod-stop man page on your system 10.4. Removing pods You can remove one or more stopped pods and containers using the podman pod rm command. Prerequisites The container-tools meta-package is installed. The pod has been created. For details, see section Creating pods . The pod has been stopped. For details, see section Stopping pods . Procedure Remove the pod mypod , type: Note that removing the pod automatically removes all containers inside it. Optional: Check that all containers and pods were removed: Additional resources podman-pod-rm man page on your system | [
"podman pod create --name mypod 223df6b390b4ea87a090a4b5207f7b9b003187a6960bd37631ae9bc12c433aff The pod is in the initial state Created.",
"podman pod ps POD ID NAME STATUS CREATED # OF CONTAINERS INFRA ID 223df6b390b4 mypod Created Less than a second ago 1 3afdcd93de3e",
"podman ps -a --pod CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES POD 3afdcd93de3e registry.access.redhat.com/ubi9/pause Less than a second ago Created 223df6b390b4-infra 223df6b390b4",
"podman run -dt --name myubi --pod mypod registry.access.redhat.com/ubi9/ubi /bin/bash 5df5c48fea87860cf75822ceab8370548b04c78be9fc156570949013863ccf71",
"podman pod ps POD ID NAME STATUS CREATED # OF CONTAINERS INFRA ID 223df6b390b4 mypod Running Less than a second ago 2 3afdcd93de3e",
"podman ps -a --pod CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES POD 5df5c48fea87 registry.access.redhat.com/ubi9/ubi:latest /bin/bash Less than a second ago Up Less than a second ago myubi 223df6b390b4 3afdcd93de3e registry.access.redhat.com/ubi9/pause Less than a second ago Up Less than a second ago 223df6b390b4-infra 223df6b390b4",
"podman pod top mypod USER PID PPID %CPU ELAPSED TTY TIME COMMAND 0 1 0 0.000 24.077433518s ? 0s /pause root 1 0 0.000 24.078146025s pts/0 0s /bin/bash",
"podman pod stats -a --no-stream ID NAME CPU % MEM USAGE / LIMIT MEM % NET IO BLOCK IO PIDS a9f807ffaacd frosty_hodgkin -- 3.092MB / 16.7GB 0.02% -- / -- -- / -- 2 3b33001239ee sleepy_stallman -- -- / -- -- -- / -- -- / -- --",
"podman pod inspect mypod { \"Id\": \"db99446fa9c6d10b973d1ce55a42a6850357e0cd447d9bac5627bb2516b5b19a\", \"Name\": \"mypod\", \"Created\": \"2020-09-08T10:35:07.536541534+02:00\", \"CreateCommand\": [ \"podman\", \"pod\", \"create\", \"--name\", \"mypod\" ], \"State\": \"Running\", \"Hostname\": \"mypod\", \"CreateCgroup\": false, \"CgroupParent\": \"/libpod_parent\", \"CgroupPath\": \"/libpod_parent/db99446fa9c6d10b973d1ce55a42a6850357e0cd447d9bac5627bb2516b5b19a\", \"CreateInfra\": false, \"InfraContainerID\": \"891c54f70783dcad596d888040700d93f3ead01921894bc19c10b0a03c738ff7\", \"SharedNamespaces\": [ \"uts\", \"ipc\", \"net\" ], \"NumContainers\": 2, \"Containers\": [ { \"Id\": \"891c54f70783dcad596d888040700d93f3ead01921894bc19c10b0a03c738ff7\", \"Name\": \"db99446fa9c6-infra\", \"State\": \"running\" }, { \"Id\": \"effc5bbcfe505b522e3bf8fbb5705a39f94a455a66fd81e542bcc27d39727d2d\", \"Name\": \"myubi\", \"State\": \"running\" } ] }",
"podman pod stop mypod",
"podman ps -a --pod CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES POD ID PODNAME 5df5c48fea87 registry.redhat.io/ubi9/ubi:latest /bin/bash About a minute ago Exited (0) 7 seconds ago myubi 223df6b390b4 mypod 3afdcd93de3e registry.access.redhat.com/9/pause About a minute ago Exited (0) 7 seconds ago 8a4e6527ac9d-infra 223df6b390b4 mypod",
"podman pod rm mypod 223df6b390b4ea87a090a4b5207f7b9b003187a6960bd37631ae9bc12c433aff",
"podman ps podman pod ps"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/building_running_and_managing_containers/assembly_working-with-pods_building-running-and-managing-containers |
Chapter 5. Setting up to Measure Performance of Applications | Chapter 5. Setting up to Measure Performance of Applications Red Hat Enterprise Linux includes several applications that can help a developer identify the causes of application performance loss. Select the Debugging Tools , Development Tools , and Performance Tools Add-ons during the system installation to install the tools OProfile , perf , and pcp . Install the tools SystemTap , which allows some types of performance analysis, and Valgrind , which includes modules for performance measurement. NOTE: Red Hat Developer Toolset is shipped as a Software Collection. The scl utility allows you to use it, running commands with the Red Hat Developer Toolset binaries used in preference to the Red Hat Enterprise Linux system equivalent. Run a SystemTap helper script for setting up the SystemTap environment. Note Running this script installs very large kernel debuginfo packages. For more frequently updated versions of SystemTap , OProfile , and Valgrind , install the Red Hat Developer Toolset package perftools . Additional Resources Red Hat Developer Toolset User Guide - Part IV., Performance Monitoring Tools | [
"yum install valgrind systemtap systemtap-runtime",
"stap-prep",
"yum install devtoolset-9-perftools"
] | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/developer_guide/setting-up_setup-measuring-performance |
Chapter 40. Red Hat Enterprise Linux Atomic Host 7.2.4 | Chapter 40. Red Hat Enterprise Linux Atomic Host 7.2.4 40.1. Atomic Host OStree update : New Tree Version: 7.2.4 (hash: b060975ce3d5abbf564ca720f64a909d1a4d332aae39cb4de581611526695a0c) Changes since Tree Version 7.2.3-1 (hash: 644fcc603549e996f051b817ba75a746f23f392cfcc7e05ce00342dec6084ea8) Updated packages : cockpit-ostree-0.103-1.el7 New packages : atomic-devmode-0.3.3-3.el7 (Technology Preview) * 40.2. Extras Updated packages : cockpit-0.103-1.el7 docker-1.9.1-40.el7 docker-distribution-2.4.0-2.el7 * kubernetes-1.2.0-0.11.git738b760.el7 runc-0.1.0-3.el7 (Technology Preview) * New packages : docker-latest-1.10.3-22.el7 The asterisk (*) marks packages which are available for Red Hat Enterprise Linux only. 40.2.1. Container Images Updated : Red Hat Enterprise Linux Container Image (rhel7/rhel) Red Hat Enterprise Linux Atomic Tools Container Image (rhel7/rhel-tools) Red Hat Enterprise Linux Atomic rsyslog Container Image (rhel7/rsyslog) Red Hat Enterprise Linux Atomic sadc Container Image (rhel7/sadc) Red Hat Enterprise Linux Atomic cockpit-ws Container Image (rhel7/cockpit-ws) Red Hat Enterprise Linux Atomic etcd Container Image (rhel7/etcd) Red Hat Enterprise Linux Atomic Kubernetes-controller Container Image (rhel7/kubernetes-controller-mgr) Red Hat Enterprise Linux Atomic Kubernetes-apiserver Container Image (rhel7/kubernetes-apiserver) Red Hat Enterprise Linux Atomic Kubernetes-scheduler Container Image (rhel7/kubernetes-scheduler) Red Hat Enterprise Linux Atomic SSSD Container Image (rhel7/sssd) (Technology Preview) 40.3. New Features Beginning with the Atomic Host 7.2.4 release, two versions of the docker service will be included in the operating system: docker 1.9 and docker 1.10. The following Knowledgebase article contains all information you need to know about using these two versions of docker: https://access.redhat.com/articles/2317361 . Introduced conflict between docker 1.9 and atomic-openshift 3.1 / origin 1.1 has been removed Previously, due to stability issues between docker 1.9 and atomic-openshift 3.1 / origin 1.1, docker 1.9 has been packaged to conflict with atomic-openshift versions older than 3.2 and origin versions older than 1.2. As a consequence, running yum update on an OpenShift Enterprise 3.1 system failed due to that introduced conflict. This bug has been fixed, and running yum update now does not cause conflicts, successfully solves the dependencies and installs docker 1.9. Updated kubernetes packages Kubernetes updated to ose v3.2.0.16 corresponding to Kubernetes v1.2.0. Additionally, support for exposing secret keys in environment variables introduced. Cockpit has been rebased to version 0.103 Most notable changes: When Cockpit fails to connect to a host, relevant SSH command or host details are now displayed to help resolve the issue. Docker restart policy can now be configured when starting a new container. Creating logical volumes has been combined into a single dialog. Joining IPA domains no longer offers a Computer OU option. Binary journal data is now displayed correctly. Disk or file system sizes are displayed using IEC names, such as MiB. Logical volumes can no longer be shrunk and the file system partition dialog prevents negative sizes. Strict Content-Security-Policy is implemented on most of Cockpit to prevent a number of browser based attacks. The packages also include numerous other bug fixes and admin interface improvements. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_atomic_host/7/html/release_notes/red_hat_enterprise_linux_atomic_host_7_2_4 |
5.207. NetworkManager-openswan | 5.207. NetworkManager-openswan 5.207.1. RHBA-2012:0915 - NetworkManager-openswan bug fix update An updated NetworkManager-openswan package that fixes various bugs is now available for Red Hat Enterprise Linux 6. NetworkManager-openswan contains software for integrating the Openswan VPN software with NetworkManager and the GNOME desktop. Bug Fixes BZ# 696946 Prior to this update, it was possible to enter an incorrect IP address into the gateway field and it was sometimes interpreted as a gateway hostname. With this update, the code has been improved to validate IP addresses. As a result, valid IPv4 addresses, as well as hostnames, are more reliably distinguished. BZ# 748365 NetworkManager-openswan was not able to import configuration files that were previously exported using NetworkManager. This release adds support for this functionality and importing Openswan IPsec configuration files is now possible using NetworkManager. All users of NetworkManager-openswan are advised to upgrade to this updated package, which fixes these bugs. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.3_technical_notes/networkmanager-openswan |
Chapter 43. JacksonXML | Chapter 43. JacksonXML Jackson XML is a Data Format which uses the Jackson library with the XMLMapper extension to unmarshal an XML payload into Java objects or to marshal Java objects into an XML payload. NOTE: If you are familiar with Jackson, this XML data format behaves in the same way as its JSON counterpart, and thus can be used with classes annotated for JSON serialization/deserialization. This extension also mimics JAXB's "Code first" approach . This data format relies on Woodstox (especially for features like pretty printing), a fast and efficient XML processor. from("activemq:My.Queue"). unmarshal().jacksonxml(). to("mqseries:Another.Queue"); 43.1. Dependencies When using jacksonxml with Red Hat build of Camel Spring Boot make sure to use the following Maven dependency to have support for auto configuration: <dependency> <groupId>org.apache.camel.springboot</groupId> <artifactId>camel-jacksonxml-starter</artifactId> </dependency> 43.2. JacksonXML Options The JacksonXML dataformat supports 15 options, which are listed below. Name Default Java Type Description xmlMapper String Lookup and use the existing XmlMapper with the given id. prettyPrint false Boolean To enable pretty printing output nicely formatted. Is by default false. unmarshalType String Class name of the java type to use when unmarshalling. jsonView String When marshalling a POJO to JSON you might want to exclude certain fields from the JSON output. With Jackson you can use JSON views to accomplish this. This option is to refer to the class which has JsonView annotations. include String If you want to marshal a pojo to JSON, and the pojo has some fields with null values. And you want to skip these null values, you can set this option to NON_NULL. allowJmsType Boolean Used for JMS users to allow the JMSType header from the JMS spec to specify a FQN classname to use to unmarshal to. collectionType String Refers to a custom collection type to lookup in the registry to use. This option should rarely be used, but allows to use different collection types than java.util.Collection based as default. useList Boolean To unmarshal to a List of Map or a List of Pojo. enableJaxbAnnotationModule Boolean Whether to enable the JAXB annotations module when using jackson. When enabled then JAXB annotations can be used by Jackson. moduleClassNames String To use custom Jackson modules com.fasterxml.jackson.databind.Module specified as a String with FQN class names. Multiple classes can be separated by comma. moduleRefs String To use custom Jackson modules referred from the Camel registry. Multiple modules can be separated by comma. enableFeatures String Set of features to enable on the Jackson com.fasterxml.jackson.databind.ObjectMapper. The features should be a name that matches a enum from com.fasterxml.jackson.databind.SerializationFeature, com.fasterxml.jackson.databind.DeserializationFeature, or com.fasterxml.jackson.databind.MapperFeature Multiple features can be separated by comma. disableFeatures String Set of features to disable on the Jackson com.fasterxml.jackson.databind.ObjectMapper. The features should be a name that matches a enum from com.fasterxml.jackson.databind.SerializationFeature, com.fasterxml.jackson.databind.DeserializationFeature, or com.fasterxml.jackson.databind.MapperFeature Multiple features can be separated by comma. allowUnmarshallType Boolean If enabled then Jackson is allowed to attempt to use the CamelJacksonUnmarshalType header during the unmarshalling. This should only be enabled when desired to be used. contentTypeHeader Boolean Whether the data format should set the Content-Type header with the type from the data format. For example application/xml for data formats marshalling to XML, or application/json for data formats marshalling to JSON. 43.2.1. Using Jackson XML in Spring DSL When using Data Format in Spring DSL you need to declare the data formats first. This is done in the DataFormats XML tag. <dataFormats> <!-- here we define a Xml data format with the id jack and that it should use the TestPojo as the class type when doing unmarshal. The unmarshalType is optional, if not provided Camel will use a Map as the type --> <jacksonxml id="jack" unmarshalType="org.apache.camel.component.jacksonxml.TestPojo"/> </dataFormats> And then you can refer to this id in the route: <route> <from uri="direct:back"/> <unmarshal><custom ref="jack"/></unmarshal> <to uri="mock:reverse"/> </route> 43.2.2. Excluding POJO fields from marshalling When marshalling a POJO to XML you might want to exclude certain fields from the XML output. With Jackson you can use JSON views to accomplish this. First create one or more marker classes. Use the marker classes with the @JsonView annotation to include/exclude certain fields. The annotation also works on getters. Finally use the Camel JacksonXMLDataFormat to marshall the above POJO to XML. Note that the weight field is missing in the resulting XML: <pojo age="30" weight="70"/> 43.3. Include/Exclude fields using the jsonView attribute with `JacksonXML`DataFormat As an example of using this attribute you can instead of: JacksonXMLDataFormat ageViewFormat = new JacksonXMLDataFormat(TestPojoView.class, Views.Age.class); from("direct:inPojoAgeView"). marshal(ageViewFormat); Directly specify your JSON view inside the Java DSL as: from("direct:inPojoAgeView"). marshal().jacksonxml(TestPojoView.class, Views.Age.class); And the same in XML DSL: <from uri="direct:inPojoAgeView"/> <marshal> <jacksonxml unmarshalType="org.apache.camel.component.jacksonxml.TestPojoView" jsonView="org.apache.camel.component.jacksonxml.ViewsUSDAge"/> </marshal> 43.4. Setting serialization include option If you want to marshal a pojo to XML, and the pojo has some fields with null values. And you want to skip these null values, then you need to set either an annotation on the pojo, @JsonInclude(Include.NON_NULL) public class MyPojo { ... } But this requires you to include that annotation in your pojo source code. You can also configure the Camel JacksonXMLDataFormat to set the include option, as shown below: JacksonXMLDataFormat format = new JacksonXMLDataFormat(); format.setInclude("NON_NULL"); Or from XML DSL you configure this as <dataFormats> <jacksonxml id="jacksonxml" include="NON_NULL"/> </dataFormats> 43.5. Unmarshalling from XML to POJO with dynamic class name If you use jackson to unmarshal XML to POJO, then you can now specify a header in the message that indicate which class name to unmarshal to. The header has key CamelJacksonUnmarshalType if that header is present in the message, then Jackson will use that as FQN for the POJO class to unmarshal the XML payload as. JacksonDataFormat format = new JacksonDataFormat(); format.setAllowJmsType(true); Or from XML DSL you configure this as <dataFormats> <jacksonxml id="jacksonxml" allowJmsType="true"/> </dataFormats> 43.6. Unmarshalling from XML to List<Map> or List<pojo> If you are using Jackson to unmarshal XML to a list of map/pojo, you can now specify this by setting useList="true" or use the org.apache.camel.component.jacksonxml.ListJacksonXMLDataFormat . For example with Java you can do as shown below: JacksonXMLDataFormat format = new ListJacksonXMLDataFormat(); // or JacksonXMLDataFormat format = new JacksonXMLDataFormat(); format.useList(); // and you can specify the pojo class type also format.setUnmarshalType(MyPojo.class); And if you use XML DSL then you configure to use list using useList attribute as shown below: <dataFormats> <jacksonxml id="jack" useList="true"/> </dataFormats> And you can specify the pojo type also <dataFormats> <jacksonxml id="jack" useList="true" unmarshalType="com.foo.MyPojo"/> </dataFormats> 43.7. Using custom Jackson modules You can use custom Jackson modules by specifying the class names of those using the moduleClassNames option as shown below. <dataFormats> <jacksonxml id="jack" useList="true" unmarshalType="com.foo.MyPojo" moduleClassNames="com.foo.MyModule,com.foo.MyOtherModule"/> </dataFormats> When using moduleClassNames then the custom jackson modules are not configured, by created using default constructor and used as-is. If a custom module needs any custom configuration, then an instance of the module can be created and configured, and then use modulesRefs to refer to the module as shown below: <bean id="myJacksonModule" class="com.foo.MyModule"> ... // configure the module as you want </bean> <dataFormats> <jacksonxml id="jacksonxml" useList="true" unmarshalType="com.foo.MyPojo" moduleRefs="myJacksonModule"/> </dataFormats> 43.8. Enabling or disable features using Jackson Jackson has a number of features you can enable or disable, which its ObjectMapper uses. For example to disable failing on unknown properties when marshalling, you can configure this using the disableFeatures: <dataFormats> <jacksonxml id="jacksonxml" unmarshalType="com.foo.MyPojo" disableFeatures="FAIL_ON_UNKNOWN_PROPERTIES"/> </dataFormats> You can disable multiple features by separating the values using comma. The values for the features must be the name of the enums from Jackson from the following enum classes com.fasterxml.jackson.databind.SerializationFeature com.fasterxml.jackson.databind.DeserializationFeature com.fasterxml.jackson.databind.MapperFeature To enable a feature use the enableFeatures options instead. From Java code you can use the type safe methods from camel-jackson module: JacksonDataFormat df = new JacksonDataFormat(MyPojo.class); df.disableFeature(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); df.disableFeature(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES); 43.9. Converting Maps to POJO using Jackson Jackson ObjectMapper can be used to convert maps to POJO objects. Jackson component comes with the data converter that can be used to convert java.util.Map instance to non-String, non-primitive and non-Number objects. Map<String, Object> invoiceData = new HashMap<String, Object>(); invoiceData.put("netValue", 500); producerTemplate.sendBody("direct:mapToInvoice", invoiceData); ... // Later in the processor Invoice invoice = exchange.getIn().getBody(Invoice.class); If there is a single ObjectMapper instance available in the Camel registry, it will used by the converter to perform the conversion. Otherwise the default mapper will be used. 43.10. Formatted XML marshalling (pretty-printing) Using the prettyPrint option one can output a well formatted XML while marshalling: <dataFormats> <jacksonxml id="jack" prettyPrint="true"/> </dataFormats> And in Java DSL: from("direct:inPretty").marshal().jacksonxml(true); Please note that there are 5 different overloaded jacksonxml() DSL methods which support the prettyPrint option in combination with other settings for unmarshalType , jsonView etc. 43.11. Spring Boot Auto-Configuration The component supports 16 options, which are listed below. Name Description Default Type camel.dataformat.jacksonxml.allow-jms-type Used for JMS users to allow the JMSType header from the JMS spec to specify a FQN classname to use to unmarshal to. false Boolean camel.dataformat.jacksonxml.allow-unmarshall-type If enabled then Jackson is allowed to attempt to use the CamelJacksonUnmarshalType header during the unmarshalling. This should only be enabled when desired to be used. false Boolean camel.dataformat.jacksonxml.collection-type Refers to a custom collection type to lookup in the registry to use. This option should rarely be used, but allows to use different collection types than java.util.Collection based as default. String camel.dataformat.jacksonxml.content-type-header Whether the data format should set the Content-Type header with the type from the data format. For example application/xml for data formats marshalling to XML, or application/json for data formats marshalling to JSON. true Boolean camel.dataformat.jacksonxml.disable-features Set of features to disable on the Jackson com.fasterxml.jackson.databind.ObjectMapper. The features should be a name that matches a enum from com.fasterxml.jackson.databind.SerializationFeature, com.fasterxml.jackson.databind.DeserializationFeature, or com.fasterxml.jackson.databind.MapperFeature Multiple features can be separated by comma. String camel.dataformat.jacksonxml.enable-features Set of features to enable on the Jackson com.fasterxml.jackson.databind.ObjectMapper. The features should be a name that matches a enum from com.fasterxml.jackson.databind.SerializationFeature, com.fasterxml.jackson.databind.DeserializationFeature, or com.fasterxml.jackson.databind.MapperFeature Multiple features can be separated by comma. String camel.dataformat.jacksonxml.enable-jaxb-annotation-module Whether to enable the JAXB annotations module when using jackson. When enabled then JAXB annotations can be used by Jackson. false Boolean camel.dataformat.jacksonxml.enabled Whether to enable auto configuration of the jacksonxml data format. This is enabled by default. Boolean camel.dataformat.jacksonxml.include If you want to marshal a pojo to JSON, and the pojo has some fields with null values. And you want to skip these null values, you can set this option to NON_NULL. String camel.dataformat.jacksonxml.json-view When marshalling a POJO to JSON you might want to exclude certain fields from the JSON output. With Jackson you can use JSON views to accomplish this. This option is to refer to the class which has JsonView annotations. String camel.dataformat.jacksonxml.module-class-names To use custom Jackson modules com.fasterxml.jackson.databind.Module specified as a String with FQN class names. Multiple classes can be separated by comma. String camel.dataformat.jacksonxml.module-refs To use custom Jackson modules referred from the Camel registry. Multiple modules can be separated by comma. String camel.dataformat.jacksonxml.pretty-print To enable pretty printing output nicely formatted. Is by default false. false Boolean camel.dataformat.jacksonxml.unmarshal-type Class name of the java type to use when unmarshalling. String camel.dataformat.jacksonxml.use-list To unmarshal to a List of Map or a List of Pojo. false Boolean camel.dataformat.jacksonxml.xml-mapper Lookup and use the existing XmlMapper with the given id. String | [
"from(\"activemq:My.Queue\"). unmarshal().jacksonxml(). to(\"mqseries:Another.Queue\");",
"<dependency> <groupId>org.apache.camel.springboot</groupId> <artifactId>camel-jacksonxml-starter</artifactId> </dependency>",
"<dataFormats> <!-- here we define a Xml data format with the id jack and that it should use the TestPojo as the class type when doing unmarshal. The unmarshalType is optional, if not provided Camel will use a Map as the type --> <jacksonxml id=\"jack\" unmarshalType=\"org.apache.camel.component.jacksonxml.TestPojo\"/> </dataFormats>",
"<route> <from uri=\"direct:back\"/> <unmarshal><custom ref=\"jack\"/></unmarshal> <to uri=\"mock:reverse\"/> </route>",
"<pojo age=\"30\" weight=\"70\"/>",
"JacksonXMLDataFormat ageViewFormat = new JacksonXMLDataFormat(TestPojoView.class, Views.Age.class); from(\"direct:inPojoAgeView\"). marshal(ageViewFormat);",
"from(\"direct:inPojoAgeView\"). marshal().jacksonxml(TestPojoView.class, Views.Age.class);",
"<from uri=\"direct:inPojoAgeView\"/> <marshal> <jacksonxml unmarshalType=\"org.apache.camel.component.jacksonxml.TestPojoView\" jsonView=\"org.apache.camel.component.jacksonxml.ViewsUSDAge\"/> </marshal>",
"@JsonInclude(Include.NON_NULL) public class MyPojo { }",
"JacksonXMLDataFormat format = new JacksonXMLDataFormat(); format.setInclude(\"NON_NULL\");",
"<dataFormats> <jacksonxml id=\"jacksonxml\" include=\"NON_NULL\"/> </dataFormats>",
"For JMS end users there is the JMSType header from the JMS spec that indicates that also. To enable support for JMSType you would need to turn that on, on the jackson data format as shown:",
"JacksonDataFormat format = new JacksonDataFormat(); format.setAllowJmsType(true);",
"<dataFormats> <jacksonxml id=\"jacksonxml\" allowJmsType=\"true\"/> </dataFormats>",
"JacksonXMLDataFormat format = new ListJacksonXMLDataFormat(); // or JacksonXMLDataFormat format = new JacksonXMLDataFormat(); format.useList(); // and you can specify the pojo class type also format.setUnmarshalType(MyPojo.class);",
"<dataFormats> <jacksonxml id=\"jack\" useList=\"true\"/> </dataFormats>",
"<dataFormats> <jacksonxml id=\"jack\" useList=\"true\" unmarshalType=\"com.foo.MyPojo\"/> </dataFormats>",
"<dataFormats> <jacksonxml id=\"jack\" useList=\"true\" unmarshalType=\"com.foo.MyPojo\" moduleClassNames=\"com.foo.MyModule,com.foo.MyOtherModule\"/> </dataFormats>",
"<bean id=\"myJacksonModule\" class=\"com.foo.MyModule\"> ... // configure the module as you want </bean> <dataFormats> <jacksonxml id=\"jacksonxml\" useList=\"true\" unmarshalType=\"com.foo.MyPojo\" moduleRefs=\"myJacksonModule\"/> </dataFormats>",
"Multiple modules can be specified separated by comma, such as moduleRefs=\"myJacksonModule,myOtherModule\"",
"<dataFormats> <jacksonxml id=\"jacksonxml\" unmarshalType=\"com.foo.MyPojo\" disableFeatures=\"FAIL_ON_UNKNOWN_PROPERTIES\"/> </dataFormats>",
"JacksonDataFormat df = new JacksonDataFormat(MyPojo.class); df.disableFeature(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES); df.disableFeature(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES);",
"Map<String, Object> invoiceData = new HashMap<String, Object>(); invoiceData.put(\"netValue\", 500); producerTemplate.sendBody(\"direct:mapToInvoice\", invoiceData); // Later in the processor Invoice invoice = exchange.getIn().getBody(Invoice.class);",
"<dataFormats> <jacksonxml id=\"jack\" prettyPrint=\"true\"/> </dataFormats>",
"from(\"direct:inPretty\").marshal().jacksonxml(true);"
] | https://docs.redhat.com/en/documentation/red_hat_build_of_apache_camel/4.0/html/red_hat_build_of_apache_camel_for_spring_boot_reference/csb-camel-jacksonxml-dataformat-starter |
C.3. User Interface Plugin-related Files and Their Locations | C.3. User Interface Plugin-related Files and Their Locations Table C.1. UI Plugin-related Files and their Locations File Location Remarks Plug-in descriptor files (meta-data) /usr/share/ovirt-engine/ui-plugins/my-plugin.json Plug-in user configuration files /etc/ovirt-engine/ui-plugins/my-plugin-config.json Plug-in resource files /usr/share/ovirt-engine/ui-plugins/<resourcePath>/PluginHostPage.html <resourcePath> is defined by the corresponding attribute in the plug-in descriptor. | null | https://docs.redhat.com/en/documentation/red_hat_virtualization/4.3/html/administration_guide/ui_plugin-related_files_and_their_locations |
Chapter 7. Scheduling policies for RHEL for Real Time | Chapter 7. Scheduling policies for RHEL for Real Time In real-time, the scheduler is the kernel component that determines the runnable thread to run. Each thread has an associated scheduling policy and a static scheduling priority, known as sched_priority . The scheduling is preemptive and therefore the currently running thread stops when a thread with a higher static priority gets ready to run. The running thread then returns to the waitlist for its static priority. All Linux threads have one of the following scheduling policies: SCHED_OTHER or SCHED_NORMAL : is the default policy. SCHED_BATCH : is similar to SCHED_OTHER , but with incremental orientation. SCHED_IDLE : is the policy with lower priority than SCHED_OTHER . SCHED_FIFO : is the first in and first out real-time policy. SCHED_RR : is the round-robin real-time policy. SCHED_DEADLINE : is a scheduler policy to prioritize tasks according to the job deadline. The job with the earliest absolute deadline runs first. 7.1. Scheduler policies The real-time threads have higher priority than the standard threads. The policies have scheduling priority values that range from the minimum value of 1 to the maximum value of 99. The following policies are critical to real-time: SCHED_OTHER or SCHED_NORMAL policy This is the default scheduling policy for Linux threads. It has a dynamic priority that is changed by the system based on the characteristics of the thread. SCHED_OTHER threads have nice values between -20, which is the highest priority and 19, which is the lowest priority. The default nice value for SCHED_OTHER threads is 0. SCHED_FIFO policy Threads with SCHED_FIFO run with higher priority over SCHED_OTHER tasks. Instead of using nice values, SCHED_FIFO uses a fixed priority between 1, which is the lowest and 99, which is the highest. A SCHED_FIFO thread with a priority of 1 always schedules first over a SCHED_OTHER thread. SCHED_RR policy The SCHED_RR policy is similar to the SCHED_FIFO policy. The threads of equal priority are scheduled in a round-robin fashion. SCHED_FIFO and SCHED_RR threads run until one of the following events occurs: The thread goes to sleep or waits for an event. A higher-priority real-time thread gets ready to run. Unless one of the above events occurs, the threads run indefinitely on the specified processor, while the lower-priority threads remain in the queue waiting to run. This might cause the system service threads to be resident and prevent being swapped out and fail the filesystem data flushing. SCHED_DEADLINE policy The SCHED_DEADLINE policy specifies the timing requirements. It schedules each task according to the task's deadline. The task with the earliest deadline first (EDF) schedule runs first. The kernel requires runtime⇐deadline⇐period to be true. The relation between the required options is runtime⇐deadline⇐period . 7.2. Parameters for SCHED_DEADLINE policy Each SCHED_DEADLINE task is characterized by period , runtime , and deadline parameters. The values for these parameters are integers of nanoseconds. Table 7.1. SCHED_DEADLINE parameters Parameter Description period period is the activation pattern of a real-time task. For example, if a video processing task has 60 frames per second to process, a new frame is queued for service every 16 milliseconds. Therefore, the period is 16 milliseconds. runtime runtime is the amount of CPU execution time allotted to the task to produce an output. In real-time, the maximum execution time, also known as "Worst Case Execution Time" (WCET) is the runtime . For example, if a video processing tool can take, in the worst case, five milliseconds to process an image, the runtime is five milliseconds. deadline deadline is the maximum time for the output to be produced. For example, if a task needs to deliver the processed frame within ten milliseconds, the deadline is ten milliseconds. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_for_real_time/9/html/understanding_rhel_for_real_time/assembly_scheduling-policies-for-rhel-for-real-time_understanding-rhel-for-real-time-core-concepts |
5.286. rpm | 5.286. rpm 5.286.1. RHBA-2012:0909 - rpm bug fix and enhancement update Updated rpm packages that fix multiple bugs and add various enhancements are now available for Red Hat Enterprise Linux 6. The RPM Package Manager (RPM) is a command-line driven package management system capable of installing, uninstalling, verifying, querying, and updating software packages. Bug Fixes BZ# 799317 Previously, presence of ELF files prevented cross-architecture obsoletion of packages on multi-arch systems, causing file conflicts when a supposedly obsoleted package was not removed. With this update, obsoletes are now processed for all matching package names regardless of their contents, allowing scenarios like eliminating no longer needed 32-bit package variant on 64-bit multi-arch systems to work as expected. BZ# 746190 Previously, a bug in execution of package scriptlets utilizing RPM's embedded Lua interpreter could have caused RPM's working directory to change inadvertently, resulting in a failure to install remaining local packages in the transaction unless absolute paths were used to address the packages on the "rpm" or "yum" command line. With this update, the Lua scriptlet execution now always saves and restores the current working directory, ensuring correct operation regardless of whether absolute or relative paths to packages are used. BZ# 785236 Previously, the "-D" shortcut option for "--define" was incorrectly taken as a shortcut for "--predefine", which led to incorrect macro evaluation when attempting to override macros from system configuration. The "-D" shortcut option now equals "--define" as intended and documented. BZ# 768516 Previously, RPM's "--last" query format output could have been ambiguous on multi-arch systems such as AMD64/Intel 64 as package architecture was omitted. Package's architecture is now included in "--last" output as well, making it non-ambiguous and also consistent with the default query output format. BZ# 664427 As the build dependencies recorded in source packages can vary depending on the architecture where the source packages happened to be generated, using the yum-builddep utility on a source package does not always report correct results. RPM's Python bindings have now been enhanced to permit yum-builddep to operate on spec files directly, ensuring that the correct build dependency information for the local system is used. BZ# 752119 Previously, certain multi-line brace constructs could have caused the automatic Perl dependency generator script to miss dependencies from pe. The generator has now been updated to properly handle these situations. Enhancements BZ# 714678 When building packages on file systems with a very high number of files, the on-disk inode numbers could have been truncated in RPM's 32bit-integer-based hardlink tracking, resulting in incorrect package generation and, consequently, installation. RPM now uses per-package virtual numbering for hardlink tracking to eliminate the possibility of truncation, ensuring correct operation regardless of physical inode numbers at package build time, in a backwards compatible way. BZ# 736960 Previously, RPM ignored any exit codes from %pretrans package scriptlets. This was inconsistent with semantics of other scriptlets and prevented the possibility of early abort of package installation before the transaction really starts. RPM now treats %pretrans failure similarly to that of %pre: the package with failing %pretrans scriptlet is not installed at all. BZ# 761000 Packages for Fedora 17 or later require a special rpmlib() dependency provided by RPM to track the /usr merge that was completed in Fedora 17, otherwise it will no be possible to use, for example, mock chroot to install and build packages for that distribution. This special tracking dependency has been added to RPM now to allow Red Hat Enterprise Linux 6 to be used as a host for building packages for these newer Fedora versions. All users of RPM are advised to upgrade to these updated packages, which fix these bugs and add these enhancements. | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.3_technical_notes/rpm |
Chapter 1. About disconnected installation mirroring | Chapter 1. About disconnected installation mirroring You can use a mirror registry to ensure that your clusters only use container images that satisfy your organizational controls on external content. Before you install a cluster on infrastructure that you provision in a restricted network, you must mirror the required container images into that environment. To mirror container images, you must have a registry for mirroring. 1.1. Creating a mirror registry If you already have a container image registry, such as Red Hat Quay, you can use it as your mirror registry. If you do not already have a registry, you can create a mirror registry using the mirror registry for Red Hat OpenShift . 1.2. Mirroring images for a disconnected installation You can use one of the following procedures to mirror your OpenShift Container Platform image repository to your mirror registry: Mirroring images for a disconnected installation Mirroring images for a disconnected installation using the oc-mirror plugin | null | https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/disconnected_installation_mirroring/installing-mirroring-disconnected-about |
Chapter 21. Debugging a Crashed Application | Chapter 21. Debugging a Crashed Application Sometimes, it is not possible to debug an application directly. In these situations, you can collect information about the application at the moment of its termination and analyze it afterwards. 21.1. Core Dumps This section describes what a core dump is and how to use it. Prerequisites Understanding of debugging information Description A core dump is a copy of a part of the application's memory at the moment the application stopped working, stored in the ELF format. It contains all the application's internal variables and stack, which enables inspection of the application's final state. When augmented with the respective executable file and debugging information, it is possible to analyze a core dump file with a debugger in a way similar to analyzing a running program. The Linux operating system kernel can record core dumps automatically, if this functionality is enabled. Alternatively, you can send a signal to any running application to generate a core dump regardless of its actual state. Warning Some limits might affect the ability to generate a core dump. 21.2. Recording Application Crashes with Core Dumps To record application crashes, set up core dump saving and add information about the system. Procedure Enable core dumps. Edit the file /etc/systemd/system.conf and change the line containing DefaultLimitCORE to the following: Reboot the system: Remove the limits for core dump sizes: To reverse this change, run the command with the value 0 instead of unlimited . When an application crashes, a core dump is generated. The default location for core dumps is the application's working directory at the time of the crash. Create an SOS report to provide additional information about the system: This creates a tar archive containing information about your system, such as copies of configuration files. Transfer the core dump and the SOS report to the computer where the debugging will take place. Transfer the executable file, too, if it is known. Important If the executable file is not known, subsequent analysis of the core file will identify it. Optional: Remove the core dump and SOS report after transferring them to free up disk space. Additional Resources Knowledgebase article - How to enable core file dumps when an application crashes or segmentation faults Knowledgebase article - What is a sosreport and how to create one in Red Hat Enterprise Linux 4.6 and later? 21.3. Inspecting Application Crash States with Core Dumps Prerequisites You have a core dump file and SOS report GDB and elfutils are installed on the system Procedure To identify the executable file where the crash occurred, run the eu-unstrip command with the core dump file: The output contains details for each module on one line, separated by spaces. The information is listed in this order: The memory address where the module was mapped The build-id of the module and where in the memory it was found The module's executable file name, displayed as - when unknown, or as . when the module has not been loaded from a file The source of debugging information, displayed as a file name when available, as . when contained in the executable file itself, or as - when not present at all The shared library name ( soname ), or [exe] for the main module In this example, the important details are the file name /usr/bin/sleep and the build-id 2818b2009547f780a5639c904cded443e564973e on the line containing the text [exe] . With this information, you can identify the executable file required for analyzing the core dump. Get the executable file that crashed. If possible, copy it from the system where the crash occurred. Use the file name extracted from the core file. Alternatively, use an identical executable file on your system. Each executable file built on Red Hat Enterprise Linux contains a note with a unique build-id value. Determine the build-id of the relevant locally available executable files: Use this information to match the executable file on the remote system with your local copy. The build-id of the local file and build-id listed in the core dump must match. Finally, if the application is installed from an RPM package, you can get the executable file from the package. Use the sosreport output to find the exact version of the package required. Get the shared libraries used by the executable file. Use the same steps as for the executable file. If the application is distributed as a package, load the executable file in GDB to display hints for missing debuginfo packages. For more details, see Section 20.1.4, "Getting debuginfo Packages for an Application or Library using GDB" . To examine the core file in detail, load the executable file and core dump file with GDB: Further messages about missing files and debugging information help you to identify what is missing for the debugging session. Return to the step if needed. If the debugging information is available as a file instead of a package, load this file in GDB with the symbol-file command: Replace program.debug with the actual file name. Note It might not be necessary to install the debugging information for all executable files contained in the core dump. Most of these executable files are libraries used by the application code. These libraries might not directly contribute to the problem you are analyzing, and you do not need to include debugging information for them. Use the GDB commands to inspect the state of the application at the moment it crashed. See Section 20.2, "Inspecting the Application's Internal State with GDB" . Note When analyzing a core file, GDB is not attached to a running process. Commands for controlling execution have no effect. Additional Resources Debugging with GDB - 2.1.1 Choosing Files Debugging with GDB - 18.1 Commands to Specify Files Debugging with GDB - 18.3 Debugging Information in Separate Files 21.4. Dumping Process Memory with gcore The workflow of core dump debugging enables the analysis of the offline state of the program. In some cases it is advantageous to use this workflow with a program that is still running, such as when it is hard to access the environment with the process. You can use the gcore command to dump memory of any process while it is still running. Prerequisites Understanding of core dumps GDB is installed on the system Procedure To dump a process memory using gcore : Find out the process id ( pid ). Use tools such as ps , pgrep , and top : Dump the memory of this process: This creates a file filename and dumps the process memory in it. While the memory is being dumped, the execution of the process is halted. After the core dump is finished, the process resumes normal execution. Create an SOS report to provide additional information about the system: This creates a tar archive containing information about your system, such as copies of configuration files. Transfer the program's executable file, core dump, and the SOS report to the computer where the debugging will take place. Optional: Remove the core dump and SOS report after transferring them to reclaim disk space. Additional resources Knowledgebase article - How to obtain a core file without restarting an application? 21.5. Dumping Protected Process Memory with GDB You can mark the memory of processes as not to be dumped. This can save resources and ensure additional security if the process memory contains sensitive data. Both kernel core dumps ( kdump ) and manual core dumps ( gcore , GDB) do not dump memory marked this way. In some cases, it is necessary to dump the whole contents of the process memory regardless of these protections. This procedure shows how to do this using the GDB debugger. Prerequisites Understanding of core dumps GDB is installed on the system GDB is attached to the process with protected memory Procedure Set GDB to ignore the settings in the /proc/PID/coredump_filter file: Set GDB to ignore the memory page flag VM_DONTDUMP : Dump the memory: Replace core-file with the name of the file of which you want to dump the memory. Additional Resources Debugging with GDB - 10.19 How to Produce a Core File from Your Program | [
"DefaultLimitCORE=infinity",
"shutdown -r now",
"ulimit -c unlimited",
"sosreport",
"eu-unstrip -n --core= ./core.9814 0x400000+0x207000 2818b2009547f780a5639c904cded443e564973e@0x400284 /usr/bin/sleep /usr/lib/debug/bin/sleep.debug [exe] 0x7fff26fff000+0x1000 1e2a683b7d877576970e4275d41a6aaec280795e@0x7fff26fff340 . - linux-vdso.so.1 0x35e7e00000+0x3b6000 374add1ead31ccb449779bc7ee7877de3377e5ad@0x35e7e00280 /usr/lib64/libc-2.14.90.so /usr/lib/debug/lib64/libc-2.14.90.so.debug libc.so.6 0x35e7a00000+0x224000 3ed9e61c2b7e707ce244816335776afa2ad0307d@0x35e7a001d8 /usr/lib64/ld-2.14.90.so /usr/lib/debug/lib64/ld-2.14.90.so.debug ld-linux-x86-64.so.2",
"eu-readelf -n executable_file",
"gdb -e executable_file -c core_file",
"(gdb) symbol-file program.debug",
"ps -C some-program",
"gcore -o filename pid",
"sosreport",
"(gdb) set use-coredump-filter off",
"(gdb) set dump-excluded-mappings on",
"(gdb) gcore core-file"
] | https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/developer_guide/debugging-crashed-application |
Chapter 9. Migrating Directory Server 10 to Directory Server 12 | Chapter 9. Migrating Directory Server 10 to Directory Server 12 Learn about migration from Red Hat Directory Server 10 to 12, including tasks that you must perform before you start the migration. Important Red Hat supports migration only from Red Hat Directory Server 10 or 11 to version 12. To migrate Directory Server from earlier version, you must perform incremental migrations to Directory Server 10 or 11. Red Hat does not support an in-place upgrade of Directory Server 10 or 11 servers to version 12 by using the leapp upgrade tool. For migration, you can use one of the following ways: If you have a replication topology, use the replication method. If you have a disconnected topology without planned replication between Directory Server 10 and Directory Server 12, or if your database is more that 1 GB, use the export and import method. 9.1. Prerequisites The existing Directory Server installation runs on version 10 and has all available updates installed. You installed a Directory Server 12 host and created an instance on the host. 9.2. Migrating Directory Server 10 to version 12 using the replication method In a replication topology, use the replication method to migrate to Directory Server 12. Procedure On the Directory Server 12 host, enable replication, but do not create a replication agreement. For details about enabling replication, see the Configuring and managing replication in the Red Hat Directory Server 12 documentation. On the Directory Server 10 host, enable replication and create a replication agreement that points to the Directory Server 12 host. For details about enabling replication, see chapter 15 "Managing Replication" in the Red Hat Directory Server 10 Administration Guide . Important If you used a custom configuration on the Directory Server 10 host, do not replace the dse.ldif configuration file on the Directory Server 12 host with the file from versions, because the dse.ldif layout changes between versions. Instead, use the dsconf utility or the web console to add the custom configuration for each parameter and plug-in that you require. Optional: Set up further Directory Server 12 hosts with replication agreements between the Directory Server 12 hosts. Configure your clients to use only the Directory Server 12 hosts. On the Directory Server 10 host, remove the replication agreements that point to the Directory Server 12 host: Uninstall the Directory Server 10 hosts. See the chapter 4.8 "Uninstalling Directory Server" in the Red Hat Directory Server 10 Installation Guide . 9.3. Migrating Directory Server 10 to version 12 using the export and import method Use the export and import method to migrate large Directory Server environments or instances without replication. Procedure Perform the following steps on the existing Directory Server 10 host: Stop and disable the dirsrv service: Export the backend. For example, to export the userRoot database and store it in the /tmp/userRoot.ldif file: Copy the following files to the new Directory Server 12 host: The LDIF file userRoot.ldif that you exported in the step. The /etc/dirsrv/slapd- DS10_instance_name /schema/99user.ldif file if you use a custom schema. The /etc/dirsrv/slapd- DS10_instance_name /dse.ldif configuration file. Important Do not replace the dse.ldif configuration file on the Directory Server 12 host with the file from the Directory Server 10 host because the dse.ldif layout changes between versions. Store the dse.ldif file for the reference. If you want to migrate an instance with TLS enabled and reuse the same host name for the Directory Server 12 installation, copy: /etc/dirsrv/slapd- DS10_instance_name /cert8.db /etc/dirsrv/slapd- DS10_instance_name /key3.db /etc/dirsrv/slapd- DS10_instance_name /pin.txt If you want to use the same host name and IP on the Directory Server 12 host, disconnect the old server from the network. Perform the following steps on the new Directory Server 12 host: Optional: Configure TLS encryption: If the new installation uses a different host name than the Directory Server 10 instance, see the Enabling TLS-encrypted connections to Directory Server section in the Securing Red Hat Directory Server documentation. If you want to use the same host name as the Directory Server 10 installation: Stop the instance: Remove the Network Security Services (NSS) databases and the password file for Directory Server, if they already exist: Move the cert8.db , key3.db , and pin.txt files that you copied from the Directory Server 10 host to the /etc/dirsrv/slapd- DS12_instance_name / directory. Set the correct permissions for the NSS databases and the password file: Start the instance: If you used a custom schema, place the 99user.ldif file in the /etc/dirsrv/slapd- DS12_instance_name /schema/ directory, set appropriate permissions, and restart the instance: Place the /tmp/userRoot.ldif file you prepared on the Directory Server 10 host in the /var/lib/dirsrv/slapd- DS12_instance_name /ldif/ directory. Import the userRoot.ldif file to restore the userRoot backend with all entries: Note that Directory Server 12 can import LDIF files only from the /var/lib/dirsrv/slapd- DS12_instance_name / directory. Important If you used a custom configuration on the Directory Server 10 host, do not replace the dse.ldif configuration file on the Directory Server 12 host with the file from versions. Instead, use the dsconf utility or the web console to add the custom configuration manually for each parameter and plug-in that you require. | [
"ldapmodify -D \"cn=Directory Manager\" -W -x -p 389 -h server_ds_10.example.com dn: cn= agreement-to-DS-12-server ,cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config changetype: delete",
"dsctl DS10_instance_name stop systemctl disable dirsrv@ DS10_instance_name",
"db2ldif -Z DS10_instance_name -n userRoot -a /tmp/userRoot.ldif",
"dsctl DS12_instance_name stop",
"rm /etc/dirsrv/slapd- DS12_instance_name /cert*.db /etc/dirsrv/slapd- DS12_instance_name /key*.db /etc/dirsrv/slapd- DS12_instance_name /pin.txt",
"chown dirsrv:root /etc/dirsrv/slapd- DS12_instance_name /cert8.db /etc/dirsrv/slapd- DS12_instance_name /key3.db /etc/dirsrv/slapd- DS12_instance_name /pin.txt chmod 600 /etc/dirsrv/slapd- DS12_instance_name /cert8.db /etc/dirsrv/slapd- DS12_instance_name /key3.db /etc/dirsrv/slapd- DS12_instance_name /pin.txt",
"dsctl DS12_instance_name start",
"cp /etc/dirsrv/slapd- DS10_instance_name /schema/99user.ldif /etc/dirsrv/slapd- DS12_instance_name /schema/ chmod 644 /etc/dirsrv/slapd- DS12_instance_name /schema/99user.ldif chown root:root /etc/dirsrv/slapd- DS12_instance_name /schema/99user.ldif dsctl DS12_instance_name restart",
"dsconf -D 'cn=Directory Manager' ldap:// server.example.com backend import userRoot /var/lib/dirsrv/slapd- instance_name /ldif/userRoot.ldif"
] | https://docs.redhat.com/en/documentation/red_hat_directory_server/12/html/installing_red_hat_directory_server/assembly_migrating-directory-server-10-to-directory-server-12_installing-rhds |
Chapter 14. Configuring secure communication by using the ssh and sshd RHEL System Roles | Chapter 14. Configuring secure communication by using the ssh and sshd RHEL System Roles As an administrator, you can use the sshd System Role to configure SSH servers and the ssh System Role to configure SSH clients consistently on any number of RHEL systems at the same time by using Red Hat Ansible Automation Platform. 14.1. ssh Server System Role variables In an sshd System Role playbook, you can define the parameters for the SSH configuration file according to your preferences and limitations. If you do not configure these variables, the System Role produces an sshd_config file that matches the RHEL defaults. In all cases, Booleans correctly render as yes and no in sshd configuration. You can define multi-line configuration items using lists. For example: renders as: Variables for the sshd System Role sshd_enable If set to False , the role is completely disabled. Defaults to True . sshd_skip_defaults If set to True , the System Role does not apply default values. Instead, you specify the complete set of configuration defaults by using either the sshd dict, or sshd_Key variables. Defaults to False . sshd_manage_service If set to False , the service is not managed, which means it is not enabled on boot and does not start or reload. Defaults to True except when running inside a container or AIX, because the Ansible service module does not currently support enabled for AIX. sshd_allow_reload If set to False , sshd does not reload after a change of configuration. This can help with troubleshooting. To apply the changed configuration, reload sshd manually. Defaults to the same value as sshd_manage_service except on AIX, where sshd_manage_service defaults to False but sshd_allow_reload defaults to True . sshd_install_service If set to True , the role installs service files for the sshd service. This overrides files provided in the operating system. Do not set to True unless you are configuring a second instance and you also change the sshd_service variable. Defaults to False . The role uses the files pointed by the following variables as templates: sshd_service This variable changes the sshd service name, which is useful for configuring a second sshd service instance. sshd A dict that contains configuration. For example: sshd _OptionName You can define options by using simple variables consisting of the sshd_ prefix and the option name instead of a dict. The simple variables override values in the sshd dict.. For example: sshd_match and sshd_match_1 to sshd_match_9 A list of dicts or just a dict for a Match section. Note that these variables do not override match blocks as defined in the sshd dict. All of the sources will be reflected in the resulting configuration file. Secondary variables for the sshd System Role You can use these variables to override the defaults that correspond to each supported platform. sshd_packages You can override the default list of installed packages using this variable. sshd_config_owner , sshd_config_group , and sshd_config_mode You can set the ownership and permissions for the openssh configuration file that this role produces using these variables. sshd_config_file The path where this role saves the openssh server configuration produced. sshd_config_namespace The default value of this variable is null, which means that the role defines the entire content of the configuration file including system defaults. Alternatively, you can use this variable to invoke this role from other roles or from multiple places in a single playbook on systems that do not support drop-in directory. The sshd_skip_defaults variable is ignored and no system defaults are used in this case. When this variable is set, the role places the configuration that you specify to configuration snippets in an existing configuration file under the given namespace. If your scenario requires applying the role several times, you need to select a different namespace for each application. Note Limitations of the openssh configuration file still apply. For example, only the first option specified in a configuration file is effective for most of the configuration options. Technically, the role places snippets in "Match all" blocks, unless they contain other match blocks, to ensure they are applied regardless of the match blocks in the existing configuration file. This allows configuring any non-conflicting options from different roles invocations. sshd_binary The path to the sshd executable of openssh . sshd_service The name of the sshd service. By default, this variable contains the name of the sshd service that the target platform uses. You can also use it to set the name of the custom sshd service when the role uses the sshd_install_service variable. sshd_verify_hostkeys Defaults to auto . When set to auto , this lists all host keys that are present in the produced configuration file, and generates any paths that are not present. Additionally, permissions and file owners are set to default values. This is useful if the role is used in the deployment stage to make sure the service is able to start on the first attempt. To disable this check, set this variable to an empty list [] . sshd_hostkey_owner , sshd_hostkey_group , sshd_hostkey_mode Use these variables to set the ownership and permissions for the host keys from sshd_verify_hostkeys . sshd_sysconfig On RHEL-based systems, this variable configures additional details of the sshd service. If set to true , this role manages also the /etc/sysconfig/sshd configuration file based on the following configuration. Defaults to false . sshd_sysconfig_override_crypto_policy In RHEL, when set to true , this variable overrides the system-wide crypto policy. Defaults to false . sshd_sysconfig_use_strong_rng On RHEL-based systems, this variable can force sshd to reseed the openssl random number generator with the number of bytes given as the argument. The default is 0 , which disables this functionality. Do not turn this on if the system does not have a hardware random number generator. 14.2. Configuring OpenSSH servers using the sshd System Role You can use the sshd System Role to configure multiple SSH servers by running an Ansible playbook. Note You can use the sshd System Role with other System Roles that change SSH and SSHD configuration, for example the Identity Management RHEL System Roles. To prevent the configuration from being overwritten, make sure that the sshd role uses namespaces (RHEL 8 and earlier versions) or a drop-in directory (RHEL 9). Prerequisites Access and permissions to one or more managed nodes , which are systems you want to configure with the sshd System Role. Access and permissions to a control node , which is a system from which Red Hat Ansible Core configures other systems. On the control node: The ansible-core and rhel-system-roles packages are installed. Important RHEL 8.0-8.5 provided access to a separate Ansible repository that contains Ansible Engine 2.9 for automation based on Ansible. Ansible Engine contains command-line utilities such as ansible , ansible-playbook , connectors such as docker and podman , and many plugins and modules. For information about how to obtain and install Ansible Engine, see the How to download and install Red Hat Ansible Engine Knowledgebase article. RHEL 8.6 and 9.0 have introduced Ansible Core (provided as the ansible-core package), which contains the Ansible command-line utilities, commands, and a small set of built-in Ansible plugins. RHEL provides this package through the AppStream repository, and it has a limited scope of support. For more information, see the Scope of support for the Ansible Core package included in the RHEL 9 and RHEL 8.6 and later AppStream repositories Knowledgebase article. An inventory file which lists the managed nodes. Procedure Copy the example playbook for the sshd System Role: # cp /usr/share/doc/rhel-system-roles/sshd/example-root-login-playbook.yml path/custom-playbook.yml Open the copied playbook by using a text editor, for example: # vim path/custom-playbook.yml --- - hosts: all tasks: - name: Configure sshd to prevent root and password login except from particular subnet include_role: name: rhel-system-roles.sshd vars: sshd: # root login and password login is enabled only from a particular subnet PermitRootLogin: no PasswordAuthentication: no Match: - Condition: "Address 192.0.2.0/24" PermitRootLogin: yes PasswordAuthentication: yes The playbook configures the managed node as an SSH server configured so that: password and root user login is disabled password and root user login is enabled only from the subnet 192.0.2.0/24 You can modify the variables according to your preferences. For more details, see SSH Server System Role variables . Optional: Verify playbook syntax. # ansible-playbook --syntax-check path/custom-playbook.yml Run the playbook on your inventory file: # ansible-playbook -i inventory_file path/custom-playbook.yml ... PLAY RECAP ************************************************** localhost : ok=12 changed=2 unreachable=0 failed=0 skipped=10 rescued=0 ignored=0 Verification Log in to the SSH server: Where: user1 is a user on the SSH server. 10.1.1.1 is the IP address of the SSH server. Check the contents of the sshd_config file on the SSH server: Check that you can connect to the server as root from the 192.0.2.0/24 subnet: Determine your IP address: If the IP address is within the 192.0.2.1 - 192.0.2.254 range, you can connect to the server. Connect to the server as root : Additional resources /usr/share/doc/rhel-system-roles/sshd/README.md file. ansible-playbook(1) man page. 14.3. ssh System Role variables In an ssh System Role playbook, you can define the parameters for the client SSH configuration file according to your preferences and limitations. If you do not configure these variables, the System Role produces a global ssh_config file that matches the RHEL defaults. In all cases, booleans correctly render as yes or no in ssh configuration. You can define multi-line configuration items using lists. For example: renders as: Note The configuration options are case sensitive. Variables for the ssh System Role ssh_user You can define an existing user name for which the System Role modifies user-specific configuration. The user-specific configuration is saved in ~/.ssh/config of the given user. The default value is null, which modifies global configuration for all users. ssh_skip_defaults Defaults to auto . If set to auto , the System Role writes the system-wide configuration file /etc/ssh/ssh_config and keeps the RHEL defaults defined there. Creating a drop-in configuration file, for example by defining the ssh_drop_in_name variable, automatically disables the ssh_skip_defaults variable. ssh_drop_in_name Defines the name for the drop-in configuration file, which is placed in the system-wide drop-in directory. The name is used in the template /etc/ssh/ssh_config.d/{ssh_drop_in_name}.conf to reference the configuration file to be modified. If the system does not support drop-in directory, the default value is null. If the system supports drop-in directories, the default value is 00-ansible . Warning If the system does not support drop-in directories, setting this option will make the play fail. The suggested format is NN-name , where NN is a two-digit number used for ordering the configuration files and name is any descriptive name for the content or the owner of the file. ssh A dict that contains configuration options and their respective values. ssh _OptionName You can define options by using simple variables consisting of the ssh_ prefix and the option name instead of a dict. The simple variables override values in the ssh dict. ssh_additional_packages This role automatically installs the openssh and openssh-clients packages, which are needed for the most common use cases. If you need to install additional packages, for example, openssh-keysign for host-based authentication, you can specify them in this variable. ssh_config_file The path to which the role saves the configuration file produced. Default value: If the system has a drop-in directory, the default value is defined by the template /etc/ssh/ssh_config.d/{ssh_drop_in_name}.conf . If the system does not have a drop-in directory, the default value is /etc/ssh/ssh_config . if the ssh_user variable is defined, the default value is ~/.ssh/config . ssh_config_owner , ssh_config_group , ssh_config_mode The owner, group and modes of the created configuration file. By default, the owner of the file is root:root , and the mode is 0644 . If ssh_user is defined, the mode is 0600 , and the owner and group are derived from the user name specified in the ssh_user variable. 14.4. Configuring OpenSSH clients using the ssh System Role You can use the ssh System Role to configure multiple SSH clients by running an Ansible playbook. Note You can use the ssh System Role with other System Roles that change SSH and SSHD configuration, for example the Identity Management RHEL System Roles. To prevent the configuration from being overwritten, make sure that the ssh role uses a drop-in directory (default from RHEL 8). Prerequisites Access and permissions to one or more managed nodes , which are systems you want to configure with the ssh System Role. Access and permissions to a control node , which is a system from which Red Hat Ansible Core configures other systems. On the control node: The ansible-core and rhel-system-roles packages are installed. Important RHEL 8.0-8.5 provided access to a separate Ansible repository that contains Ansible Engine 2.9 for automation based on Ansible. Ansible Engine contains command-line utilities such as ansible , ansible-playbook , connectors such as docker and podman , and many plugins and modules. For information about how to obtain and install Ansible Engine, see the How to download and install Red Hat Ansible Engine Knowledgebase article. RHEL 8.6 and 9.0 have introduced Ansible Core (provided as the ansible-core package), which contains the Ansible command-line utilities, commands, and a small set of built-in Ansible plugins. RHEL provides this package through the AppStream repository, and it has a limited scope of support. For more information, see the Scope of support for the Ansible Core package included in the RHEL 9 and RHEL 8.6 and later AppStream repositories Knowledgebase article. An inventory file which lists the managed nodes. Procedure Create a new playbook.yml file with the following content: --- - hosts: all tasks: - name: "Configure ssh clients" include_role: name: rhel-system-roles.ssh vars: ssh_user: root ssh: Compression: true GSSAPIAuthentication: no ControlMaster: auto ControlPath: ~/.ssh/.cm%C Host: - Condition: example Hostname: example.com User: user1 ssh_ForwardX11: no This playbook configures the root user's SSH client preferences on the managed nodes with the following configurations: Compression is enabled. ControlMaster multiplexing is set to auto . The example alias for connecting to the example.com host is user1 . The example host alias is created, which represents a connection to the example.com host the with user1 user name. X11 forwarding is disabled. Optionally, you can modify these variables according to your preferences. For more details, see ssh System Role variables . Optional: Verify playbook syntax. Run the playbook on your inventory file: Verification Verify that the managed node has the correct configuration by opening the SSH configuration file in a text editor, for example: After application of the example playbook shown above, the configuration file should have the following content: 14.5. Using the sshd System Role for non-exclusive configuration Normally, applying the sshd System Role overwrites the entire configuration. This may be problematic if you have previously adjusted the configuration, for example with a different System Role or playbook. To apply the sshd System Role for only selected configuration options while keeping other options in place, you can use the non-exclusive configuration. In RHEL 8 and earlier, you can apply the non-exclusive configuration with a configuration snippet. Prerequisites Access and permissions to one or more managed nodes , which are systems you want to configure with the sshd System Role. Access and permissions to a control node , which is a system from which Red Hat Ansible Core configures other systems. On the control node: The ansible-core package is installed. An inventory file which lists the managed nodes. A playbook for a different RHEL System Role. Procedure Add a configuration snippet with the sshd_config_namespace variable to the playbook: --- - hosts: all tasks: - name: <Configure SSHD to accept some useful environment variables> include_role: name: rhel-system-roles.sshd vars: sshd_config_namespace: <my-application> sshd: # Environment variables to accept AcceptEnv: LANG LS_COLORS EDITOR When you apply the playbook to the inventory, the role adds the following snippet, if not already present, to the /etc/ssh/sshd_config file. # BEGIN sshd system role managed block: namespace <my-application> Match all AcceptEnv LANG LS_COLORS EDITOR # END sshd system role managed block: namespace <my-application> Verification Optional: Verify playbook syntax. Additional resources /usr/share/doc/rhel-system-roles/sshd/README.md file. ansible-playbook(1) man page. | [
"sshd_ListenAddress: - 0.0.0.0 - '::'",
"ListenAddress 0.0.0.0 ListenAddress ::",
"sshd_service_template_service (default: templates/sshd.service.j2) sshd_service_template_at_service (default: templates/[email protected]) sshd_service_template_socket (default: templates/sshd.socket.j2)",
"sshd: Compression: yes ListenAddress: - 0.0.0.0",
"sshd_Compression: no",
"cp /usr/share/doc/rhel-system-roles/sshd/example-root-login-playbook.yml path/custom-playbook.yml",
"vim path/custom-playbook.yml --- - hosts: all tasks: - name: Configure sshd to prevent root and password login except from particular subnet include_role: name: rhel-system-roles.sshd vars: sshd: # root login and password login is enabled only from a particular subnet PermitRootLogin: no PasswordAuthentication: no Match: - Condition: \"Address 192.0.2.0/24\" PermitRootLogin: yes PasswordAuthentication: yes",
"ansible-playbook --syntax-check path/custom-playbook.yml",
"ansible-playbook -i inventory_file path/custom-playbook.yml PLAY RECAP ************************************************** localhost : ok=12 changed=2 unreachable=0 failed=0 skipped=10 rescued=0 ignored=0",
"ssh user1 @ 10.1.1.1",
"cat /etc/ssh/sshd_config ... PasswordAuthentication no PermitRootLogin no ... Match Address 192.0.2.0/24 PasswordAuthentication yes PermitRootLogin yes ...",
"hostname -I 192.0.2.1",
"ssh [email protected]",
"LocalForward: - 22 localhost:2222 - 403 localhost:4003",
"LocalForward 22 localhost:2222 LocalForward 403 localhost:4003",
"--- - hosts: all tasks: - name: \"Configure ssh clients\" include_role: name: rhel-system-roles.ssh vars: ssh_user: root ssh: Compression: true GSSAPIAuthentication: no ControlMaster: auto ControlPath: ~/.ssh/.cm%C Host: - Condition: example Hostname: example.com User: user1 ssh_ForwardX11: no",
"ansible-playbook --syntax-check path/custom-playbook.yml",
"ansible-playbook -i inventory_file path/custom-playbook.yml",
"vi ~root/.ssh/config",
"Ansible managed Compression yes ControlMaster auto ControlPath ~/.ssh/.cm%C ForwardX11 no GSSAPIAuthentication no Host example Hostname example.com User user1",
"--- - hosts: all tasks: - name: <Configure SSHD to accept some useful environment variables> include_role: name: rhel-system-roles.sshd vars: sshd_config_namespace: <my-application> sshd: # Environment variables to accept AcceptEnv: LANG LS_COLORS EDITOR",
"BEGIN sshd system role managed block: namespace <my-application> Match all AcceptEnv LANG LS_COLORS EDITOR END sshd system role managed block: namespace <my-application>",
"ansible-playbook --syntax-check playbook.yml -i inventory_file"
] | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/automating_system_administration_by_using_rhel_system_roles_in_rhel_7.9/configuring-secure-communication-by-using-the-ssh-and-sshd-rhel-system-roles_automating-system-administration-by-using-rhel-system-roles |
Managing resources | Managing resources Red Hat OpenShift AI Cloud Service 1 Manage administration tasks from the OpenShift AI dashboard | null | https://docs.redhat.com/en/documentation/red_hat_openshift_ai_cloud_service/1/html/managing_resources/index |
19.6. Displaying Guest Details | 19.6. Displaying Guest Details You can use the Virtual Machine Monitor to view activity information for any virtual machines on your system. To view a virtual system's details: In the Virtual Machine Manager main window, highlight the virtual machine that you want to view. Figure 19.12. Selecting a virtual machine to display From the Virtual Machine Manager Edit menu, select Virtual Machine Details . When the Virtual Machine details window opens, there may be a console displayed. Should this happen, click View and then select Details . The Overview window opens first by default. To go back to this window, select Overview from the navigation pane on the left-hand side. The Overview view shows a summary of configuration details for the guest. Figure 19.13. Displaying guest details overview Select CPUs from the navigation pane on the left-hand side. The CPUs view allows you to view or change the current processor allocation. It is also possible to increase the number of virtual CPUs (vCPUs) while the virtual machine is running, which is referred to as hot plugging . Important Hot unplugging vCPUs is not supported in Red Hat Enterprise Linux 7. Figure 19.14. Processor allocation panel Select Memory from the navigation pane on the left-hand side. The Memory view allows you to view or change the current memory allocation. Figure 19.15. Displaying memory allocation Select Boot Options from the navigation pane on the left-hand side. The Boot Options view allows you to view or change the boot options including whether or not the virtual machine starts when the host boots and the boot device order for the virtual machine. Figure 19.16. Displaying boot options Each virtual disk attached to the virtual machine is displayed in the navigation pane. click a virtual disk to modify or remove it. Figure 19.17. Displaying disk configuration Each virtual network interface attached to the virtual machine is displayed in the navigation pane. click a virtual network interface to modify or remove it. Figure 19.18. Displaying network configuration | null | https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_deployment_and_administration_guide/sect-managing_guests_with_the_virtual_machine_manager_virt_manager-displaying_guest_details |
Chapter 4. Preparing overcloud templates for DCN deployment | Chapter 4. Preparing overcloud templates for DCN deployment 4.1. Prerequisites for using separate heat stacks Your environment must meet the following prerequisites before you create a deployment using separate heat stacks: An installed instance of Red Hat OpenStack Platform director 17.1. For Ceph Storage users: access to Red Hat Ceph Storage 5. For the central location: three nodes that are capable of serving as central Controller nodes. All three Controller nodes must be in the same heat stack. You cannot split Controller nodes, or any of the control plane services, across separate heat stacks. Ceph storage is a requirement at the central location if you plan to deploy Ceph storage at the edge. For each additional DCN site: three HCI compute nodes. All nodes must be pre-provisioned or able to PXE boot from the central deployment network. You can use a DHCP relay to enable this connectivity for DCNs. All nodes have been introspected by ironic. Red Hat recommends leaving the <role>HostnameFormat parameter as the default value: %stackname%-<role>-%index%. If you do not include the %stackname% prefix, your overcloud uses the same hostnames for distributed compute nodes in different stacks. Ensure that your distributed compute nodes use the %stackname% prefix to distinguish nodes from different edge sites. For example, if you deploy two edge sites named dcn0 and dcn1 , the stack name prefix helps you to distinguish between dcn0-distributedcompute-0 and dcn1-distributedcompute-0 when you run the openstack server list command on the undercloud. Source the centralrc authentication file to schedule workloads at edge sites as well as at the central location. You do not require authentication files that are automatically generated for edge sites. 4.2. Limitations of the example separate heat stacks deployment This document provides an example deployment that uses separate heat stacks on Red Hat OpenStack Platform. This example environment has the following limitations: Spine/Leaf networking - The example in this guide does not demonstrate routing requirements, which are required in distributed compute node (DCN) deployments. Ironic DHCP Relay - This guide does not include how to configure Ironic with a DHCP relay. 4.3. Designing your separate heat stacks deployment To segment your deployment within separate heat stacks, you must first deploy a single overcloud with the control plane. You can then create separate stacks for the distributed compute node (DCN) sites. The following example shows separate stacks for different node types: Controller nodes: A separate heat stack named central , for example, deploys the controllers. When you create new heat stacks for the DCN sites, you must create them with data from the central stack. The Controller nodes must be available for any instance management tasks. DCN sites: You can have separate, uniquely named heat stacks, such as dcn0 , dcn1 , and so on. Use a DHCP relay to extend the provisioning network to the remote site. Note You must create a separate availability zone (AZ) for each stack. 4.4. Managing separate heat stacks The procedures in this guide show how to organize the environment files for three heat stacks: central , dcn0 , and dcn1 . Red Hat recommends that you store the templates for each heat stack in a separate directory to keep the information about each deployment isolated. Procedure Define the central heat stack: Extract data from the central heat stack into a common directory for all DCN sites: Define the dcn0 site. To deploy more DCN sites, create additional dcn directories by number. Note The touch is used to provide an example of file organization. Each file must contain the appropriate content for successful deployments. 4.5. Retrieving the container images Use the following procedure, and its example file contents, to retrieve the container images you need for deployments with separate heat stacks. You must ensure the container images for optional or edge-specific services are included by running the openstack container image prepare command with edge site's environment files. For more information, see Preparing container images in the Installing and managing Red Hat OpenStack Platform with director guide. Procedure Add your Registry Service Account credentials to containers.yaml . Generate the environment file as images-env.yaml : The resulting images-env.yaml file is included as part of the overcloud deployment procedure for the stack for which it is generated. 4.6. Creating fast datapath roles for the edge To use fast datapath services at the edge, you must create a custom role that defines both fast datapath and edge services. When you create the roles file for deployment, you can include the newly created role that defines services needed for both distributed compute node architecture and fast datapath services such as DPDK or SR-IOV. For example, create a custom role for distributedCompute with DPDK: Prerequisites A successful undercloud installation. For more information, see Installing the undercloud . Procedure Log in to the undercloud host as the stack user. Copy the default roles directory: Create a new file named DistributedComputeDpdk.yaml from the DistributedCompute.yaml file: Add DPDK services to the new DistributedComputeDpdk.yaml file. You can identify the parameters that you need to add by identifying the parameters in the ComputeOvsDpdk.yaml file that are not present in the DistributedComputeDpdk.yaml file. In the output, the parameters that are preceded by + are present in the ComputeOvsDpdk.yaml file but are not present in the DistributedComputeDpdk.yaml file. Include these parameters in the new DistributedComputeDpdk.yaml file. Use the DistributedComputeDpdk.yaml to create a DistributedComputeDpdk roles file : You can use this same method to create fast datapath roles for SR-IOV, or a combination of SR-IOV and DPDK for the edge to meet your requirements. 4.7. Configuring jumbo frames Jumbo frames are frames with an MTU of 9,000. Jumbo frames are not mandatory for the Storage and Storage Management networks but the increase in MTU size improves storage performance. If you want to use jumbo frames, you must configure all network switch ports in the data path to support jumbo frames. Important Network configuration changes such as MTU settings must be completed during the initial deployment. They cannot be applied to an existing deployment. Procedure Log in to the undercloud node as the stack user. Locate the network definition file. Modify the network definition file to extend the template to include the StorageMgmtIpSubnet and StorageMgmtNetworkVlanID attributes of the Storage Management network. Set the mtu attribute of the interfaces to 9000 . The following is an example of implementing these interface settings: Note For a complete example of a network definition file, see Example network definition file . Save the changes to the network definition file. Note All network switch ports between servers using the interface with the new MTU setting must be updated to support jumbo frames. If these switch changes are not made, problems will develop at the application layer that can cause the Red Hat Ceph Storage cluster to not reach quorum. If these settings are made and these problems are still observed, verify all hosts using the network configured for jumbo frames can communicate at the configured MTU setting. Use a command like the following example to perform this task: ping -M do -s 8972 172.16.1.11 If you are planning to deploy edge sites without block storage, see the following: Chapter 5, Installing the central location Section 6.2, "Deploying edge nodes without storage" If you plan to deploy edge sites with Red Hat Ceph Storage, see the following: Chapter 5, Installing the central location Section 7.4, "Deploying edge sites with hyperconverged storage" | [
"mkdir central touch central/overrides.yaml",
"mkdir dcn-common touch dcn-common/overrides.yaml",
"mkdir dcn0 touch dcn0/overrides.yaml",
"parameter_defaults: ContainerImagePrepare: - push_destination: true set: ceph_namespace: registry.redhat.io/rhceph ceph_image: rhceph-6-rhel9 ceph_tag: latest name_prefix: openstack- namespace: registry.redhat.io/rhosp17-rhel9 tag: latest ContainerImageRegistryCredentials: # https://access.redhat.com/RegistryAuthentication registry.redhat.io: registry-service-account-username: registry-service-account-password",
"sudo openstack tripleo container image prepare -e containers.yaml --output-env-file images-env.yaml",
"cp -r /usr/share/openstack-tripleo-heat-templates/roles ~/.",
"cp roles/DistributedCompute.yaml roles/DistributedComputeDpdk.yaml",
"diff -u roles/DistributedComputeDpdk.yaml roles/ComputeOvsDpdk.yaml",
"openstack overcloud roles generate --roles-path ~/roles/ -o ~/roles/roles-custom.yaml DistributedComputeDpdk",
"- type: interface name: em2 use_dhcp: false mtu: 9000 - type: vlan device: em2 mtu: 9000 use_dhcp: false vlan_id: {get_param: StorageMgmtNetworkVlanID} addresses: - ip_netmask: {get_param: StorageMgmtIpSubnet} - type: vlan device: em2 mtu: 9000 use_dhcp: false vlan_id: {get_param: StorageNetworkVlanID} addresses: - ip_netmask: {get_param: StorageIpSubnet}"
] | https://docs.redhat.com/en/documentation/red_hat_openstack_platform/17.1/html/deploying_a_distributed_compute_node_dcn_architecture/preparing_overcloud_templates_for_dcn_deployment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.