title
stringlengths
4
168
content
stringlengths
7
1.74M
commands
sequencelengths
1
5.62k
url
stringlengths
79
342
Installing on Azure
Installing on Azure OpenShift Container Platform 4.17 Installing OpenShift Container Platform on Azure Red Hat OpenShift Documentation Team
[ "az login", "az account list --refresh", "[ { \"cloudName\": \"AzureCloud\", \"id\": \"8xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": true, \"name\": \"Subscription Name 1\", \"state\": \"Enabled\", \"tenantId\": \"6xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }, { \"cloudName\": \"AzureCloud\", \"id\": \"9xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": false, \"name\": \"Subscription Name 2\", \"state\": \"Enabled\", \"tenantId\": \"7xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } } ]", "az account show", "{ \"environmentName\": \"AzureCloud\", \"id\": \"8xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": true, \"name\": \"Subscription Name 1\", \"state\": \"Enabled\", \"tenantId\": \"6xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }", "az account set -s <subscription_id>", "az account show", "{ \"environmentName\": \"AzureCloud\", \"id\": \"9xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": true, \"name\": \"Subscription Name 2\", \"state\": \"Enabled\", \"tenantId\": \"7xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }", "az ad sp create-for-rbac --role <role_name> \\ 1 --name <service_principal> \\ 2 --scopes /subscriptions/<subscription_id> 3", "Creating 'Contributor' role assignment under scope '/subscriptions/<subscription_id>' The output includes credentials that you must protect. Be sure that you do not include these credentials in your code or check the credentials into your source control. For more information, see https://aka.ms/azadsp-cli { \"appId\": \"axxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"displayName\": <service_principal>\", \"password\": \"00000000-0000-0000-0000-000000000000\", \"tenantId\": \"8xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\" }", "az role assignment create --role \"User Access Administrator\" --assignee-object-id USD(az ad sp show --id <appId> --query id -o tsv) 1 --scope /subscriptions/<subscription_id> 2", "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "tar -xvf openshift-install-linux.tar.gz", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "export RESOURCEGROUP=\"<resource_group>\" \\ 1 LOCATION=\"<location>\" 2", "export KEYVAULT_NAME=\"<keyvault_name>\" \\ 1 KEYVAULT_KEY_NAME=\"<keyvault_key_name>\" \\ 2 DISK_ENCRYPTION_SET_NAME=\"<disk_encryption_set_name>\" 3", "export CLUSTER_SP_ID=\"<service_principal_id>\" 1", "az feature register --namespace \"Microsoft.Compute\" --name \"EncryptionAtHost\"", "az feature show --namespace Microsoft.Compute --name EncryptionAtHost", "az provider register -n Microsoft.Compute", "az group create --name USDRESOURCEGROUP --location USDLOCATION", "az keyvault create -n USDKEYVAULT_NAME -g USDRESOURCEGROUP -l USDLOCATION --enable-purge-protection true", "az keyvault key create --vault-name USDKEYVAULT_NAME -n USDKEYVAULT_KEY_NAME --protection software", "KEYVAULT_ID=USD(az keyvault show --name USDKEYVAULT_NAME --query \"[id]\" -o tsv)", "KEYVAULT_KEY_URL=USD(az keyvault key show --vault-name USDKEYVAULT_NAME --name USDKEYVAULT_KEY_NAME --query \"[key.kid]\" -o tsv)", "az disk-encryption-set create -n USDDISK_ENCRYPTION_SET_NAME -l USDLOCATION -g USDRESOURCEGROUP --source-vault USDKEYVAULT_ID --key-url USDKEYVAULT_KEY_URL", "DES_IDENTITY=USD(az disk-encryption-set show -n USDDISK_ENCRYPTION_SET_NAME -g USDRESOURCEGROUP --query \"[identity.principalId]\" -o tsv)", "az keyvault set-policy -n USDKEYVAULT_NAME -g USDRESOURCEGROUP --object-id USDDES_IDENTITY --key-permissions wrapkey unwrapkey get", "DES_RESOURCE_ID=USD(az disk-encryption-set show -n USDDISK_ENCRYPTION_SET_NAME -g USDRESOURCEGROUP --query \"[id]\" -o tsv)", "az role assignment create --assignee USDCLUSTER_SP_ID --role \"<reader_role>\" \\ 1 --scope USDDES_RESOURCE_ID -o jsonc", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "az vm image list --all --offer rh-ocp-worker --publisher redhat -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- ----------------- rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocp-worker:4.15.2024072409 4.15.2024072409 rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.15.2024072409 4.15.2024072409", "az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- ----------------- rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.15.2024072409 4.15.2024072409 rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.15.2024072409 4.15.2024072409", "az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "apiVersion: v1 baseDomain: example.com compute: - hyperthreading: Enabled name: worker platform: azure: type: Standard_D4s_v5 osImage: publisher: redhat offer: rh-ocp-worker sku: rh-ocp-worker version: 413.92.2023101700 replicas: 3", "./openshift-install create install-config --dir <installation_directory> 1", "controlPlane: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "compute: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "controlPlane: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "compute: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "apiVersion: v1 baseDomain: example.com 1 controlPlane: 2 hyperthreading: Enabled 3 4 name: master platform: azure: encryptionAtHost: true ultraSSDCapability: Enabled osDisk: diskSizeGB: 1024 5 diskType: Premium_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version type: Standard_D8s_v3 replicas: 3 compute: 6 - hyperthreading: Enabled 7 8 name: worker platform: azure: ultraSSDCapability: Enabled type: Standard_D2s_v3 encryptionAtHost: true osDisk: diskSizeGB: 512 9 diskType: Standard_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version zones: 10 - \"1\" - \"2\" - \"3\" replicas: 5 metadata: name: test-cluster 11 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 12 serviceNetwork: - 172.30.0.0/16 platform: azure: defaultMachinePlatform: osImage: 13 publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version ultraSSDCapability: Enabled baseDomainResourceGroupName: resource_group 14 region: centralus 15 resourceGroupName: existing_resource_group 16 outboundType: Loadbalancer cloudName: AzurePublicCloud pullSecret: '{\"auths\": ...}' 17 fips: false 18 sshKey: ssh-ed25519 AAAA... 19", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "apiVersion: v1 baseDomain: example.com # platform: azure: userTags: 1 <key>: <value> 2 #", "apiVersion: v1 baseDomain: example.com # platform: azure: userTags: createdBy: user environment: dev #", "oc get infrastructures.config.openshift.io cluster -o=jsonpath-as-json='{.status.platformStatus.azure.resourceTags}'", "[ [ { \"key\": \"createdBy\", \"value\": \"user\" }, { \"key\": \"environment\", \"value\": \"dev\" } ] ]", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: azure_subscription_id: <base64_encoded_azure_subscription_id> azure_client_id: <base64_encoded_azure_client_id> azure_client_secret: <base64_encoded_azure_client_secret> azure_tenant_id: <base64_encoded_azure_tenant_id> azure_resource_prefix: <base64_encoded_azure_resource_prefix> azure_resourcegroup: <base64_encoded_azure_resourcegroup> azure_region: <base64_encoded_azure_region>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl.<rhel_version>\" \\ 1 -a ~/.pull-secret", "chmod 775 ccoctl.<rhel_version>", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "az login", "ccoctl azure create-all --name=<azure_infra_name> \\ 1 --output-dir=<ccoctl_output_dir> \\ 2 --region=<azure_region> \\ 3 --subscription-id=<azure_subscription_id> \\ 4 --credentials-requests-dir=<path_to_credentials_requests_directory> \\ 5 --dnszone-resource-group-name=<azure_dns_zone_resource_group_name> \\ 6 --tenant-id=<azure_tenant_id> 7", "ls <path_to_ccoctl_output_dir>/manifests", "azure-ad-pod-identity-webhook-config.yaml cluster-authentication-02-config.yaml openshift-cloud-controller-manager-azure-cloud-credentials-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capz-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-disk-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-file-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-azure-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "apiVersion: v1 baseDomain: example.com platform: azure: resourceGroupName: <azure_infra_name> 1", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "./openshift-install create install-config --dir <installation_directory> 1", "controlPlane: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "compute: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "controlPlane: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "compute: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "apiVersion: v1 baseDomain: example.com 1 controlPlane: 2 hyperthreading: Enabled 3 4 name: master platform: azure: encryptionAtHost: true ultraSSDCapability: Enabled osDisk: diskSizeGB: 1024 5 diskType: Premium_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version type: Standard_D8s_v3 replicas: 3 compute: 6 - hyperthreading: Enabled 7 8 name: worker platform: azure: ultraSSDCapability: Enabled type: Standard_D2s_v3 encryptionAtHost: true osDisk: diskSizeGB: 512 9 diskType: Standard_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version zones: 10 - \"1\" - \"2\" - \"3\" replicas: 5 metadata: name: test-cluster 11 networking: 12 clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 13 serviceNetwork: - 172.30.0.0/16 platform: azure: defaultMachinePlatform: osImage: 14 publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version ultraSSDCapability: Enabled baseDomainResourceGroupName: resource_group 15 region: centralus 16 resourceGroupName: existing_resource_group 17 outboundType: Loadbalancer cloudName: AzurePublicCloud pullSecret: '{\"auths\": ...}' 18 fips: false 19 sshKey: ssh-ed25519 AAAA... 20", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "./openshift-install create manifests --dir <installation_directory> 1", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec:", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: defaultNetwork: ovnKubernetesConfig: ipsecConfig: mode: Full", "rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml openshift/99_openshift-cluster-api_worker-machineset-*.yaml", "spec: clusterNetwork: - cidr: 10.128.0.0/19 hostPrefix: 23 - cidr: 10.128.32.0/19 hostPrefix: 23", "spec: serviceNetwork: - 172.30.0.0/14", "defaultNetwork: type: OVNKubernetes ovnKubernetesConfig: mtu: 1400 genevePort: 6081 ipsecConfig: mode: Full", "./openshift-install create manifests --dir <installation_directory>", "cat <<EOF > <installation_directory>/manifests/cluster-network-03-config.yml apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: EOF", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: defaultNetwork: ovnKubernetesConfig: hybridOverlayConfig: hybridClusterNetwork: 1 - cidr: 10.132.0.0/14 hostPrefix: 23 hybridOverlayVXLANPort: 9898 2", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: azure_subscription_id: <base64_encoded_azure_subscription_id> azure_client_id: <base64_encoded_azure_client_id> azure_client_secret: <base64_encoded_azure_client_secret> azure_tenant_id: <base64_encoded_azure_tenant_id> azure_resource_prefix: <base64_encoded_azure_resource_prefix> azure_resourcegroup: <base64_encoded_azure_resourcegroup> azure_region: <base64_encoded_azure_region>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl.<rhel_version>\" \\ 1 -a ~/.pull-secret", "chmod 775 ccoctl.<rhel_version>", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "az login", "ccoctl azure create-all --name=<azure_infra_name> \\ 1 --output-dir=<ccoctl_output_dir> \\ 2 --region=<azure_region> \\ 3 --subscription-id=<azure_subscription_id> \\ 4 --credentials-requests-dir=<path_to_credentials_requests_directory> \\ 5 --dnszone-resource-group-name=<azure_dns_zone_resource_group_name> \\ 6 --tenant-id=<azure_tenant_id> 7", "ls <path_to_ccoctl_output_dir>/manifests", "azure-ad-pod-identity-webhook-config.yaml cluster-authentication-02-config.yaml openshift-cloud-controller-manager-azure-cloud-credentials-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capz-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-disk-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-file-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-azure-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "apiVersion: v1 baseDomain: example.com platform: azure: resourceGroupName: <azure_infra_name> 1", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "./openshift-install create install-config --dir <installation_directory> 1", "pullSecret: '{\"auths\":{\"<mirror_host_name>:5000\": {\"auth\": \"<credentials>\",\"email\": \"[email protected]\"}}}'", "additionalTrustBundle: | -----BEGIN CERTIFICATE----- ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ -----END CERTIFICATE-----", "networkResourceGroupName: <vnet_resource_group> 1 virtualNetwork: <vnet> 2 controlPlaneSubnet: <control_plane_subnet> 3 computeSubnet: <compute_subnet> 4", "imageContentSources: - mirrors: - <mirror_host_name>:5000/<repo_name>/release source: quay.io/openshift-release-dev/ocp-release - mirrors: - <mirror_host_name>:5000/<repo_name>/release source: registry.redhat.io/ocp/release", "publish: Internal", "controlPlane: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "compute: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "controlPlane: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "compute: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "apiVersion: v1 baseDomain: example.com 1 controlPlane: 2 hyperthreading: Enabled 3 4 name: master platform: azure: encryptionAtHost: true ultraSSDCapability: Enabled osDisk: diskSizeGB: 1024 5 diskType: Premium_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version type: Standard_D8s_v3 replicas: 3 compute: 6 - hyperthreading: Enabled 7 8 name: worker platform: azure: ultraSSDCapability: Enabled type: Standard_D2s_v3 encryptionAtHost: true osDisk: diskSizeGB: 512 9 diskType: Standard_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version zones: 10 - \"1\" - \"2\" - \"3\" replicas: 5 metadata: name: test-cluster 11 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 12 serviceNetwork: - 172.30.0.0/16 platform: azure: defaultMachinePlatform: osImage: 13 publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version ultraSSDCapability: Enabled baseDomainResourceGroupName: resource_group 14 region: centralus 15 resourceGroupName: existing_resource_group 16 networkResourceGroupName: vnet_resource_group 17 virtualNetwork: vnet 18 controlPlaneSubnet: control_plane_subnet 19 computeSubnet: compute_subnet 20 outboundType: UserDefinedRouting 21 cloudName: AzurePublicCloud pullSecret: '{\"auths\": ...}' 22 fips: false 23 sshKey: ssh-ed25519 AAAA... 24 additionalTrustBundle: | 25 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- imageContentSources: 26 - mirrors: - <local_registry>/<local_repository_name>/release source: quay.io/openshift-release-dev/ocp-release - mirrors: - <local_registry>/<local_repository_name>/release source: quay.io/openshift-release-dev/ocp-v4.0-art-dev publish: Internal 27", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: azure_subscription_id: <base64_encoded_azure_subscription_id> azure_client_id: <base64_encoded_azure_client_id> azure_client_secret: <base64_encoded_azure_client_secret> azure_tenant_id: <base64_encoded_azure_tenant_id> azure_resource_prefix: <base64_encoded_azure_resource_prefix> azure_resourcegroup: <base64_encoded_azure_resourcegroup> azure_region: <base64_encoded_azure_region>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl.<rhel_version>\" \\ 1 -a ~/.pull-secret", "chmod 775 ccoctl.<rhel_version>", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "az login", "ccoctl azure create-all --name=<azure_infra_name> \\ 1 --output-dir=<ccoctl_output_dir> \\ 2 --region=<azure_region> \\ 3 --subscription-id=<azure_subscription_id> \\ 4 --credentials-requests-dir=<path_to_credentials_requests_directory> \\ 5 --dnszone-resource-group-name=<azure_dns_zone_resource_group_name> \\ 6 --tenant-id=<azure_tenant_id> 7", "ls <path_to_ccoctl_output_dir>/manifests", "azure-ad-pod-identity-webhook-config.yaml cluster-authentication-02-config.yaml openshift-cloud-controller-manager-azure-cloud-credentials-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capz-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-disk-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-file-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-azure-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "apiVersion: v1 baseDomain: example.com platform: azure: resourceGroupName: <azure_infra_name> 1", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "./openshift-install create install-config --dir <installation_directory> 1", "controlPlane: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "compute: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "controlPlane: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "compute: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "apiVersion: v1 baseDomain: example.com 1 controlPlane: 2 hyperthreading: Enabled 3 4 name: master platform: azure: encryptionAtHost: true ultraSSDCapability: Enabled osDisk: diskSizeGB: 1024 5 diskType: Premium_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version type: Standard_D8s_v3 replicas: 3 compute: 6 - hyperthreading: Enabled 7 8 name: worker platform: azure: ultraSSDCapability: Enabled type: Standard_D2s_v3 encryptionAtHost: true osDisk: diskSizeGB: 512 9 diskType: Standard_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version zones: 10 - \"1\" - \"2\" - \"3\" replicas: 5 metadata: name: test-cluster 11 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 12 serviceNetwork: - 172.30.0.0/16 platform: azure: defaultMachinePlatform: osImage: 13 publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version ultraSSDCapability: Enabled baseDomainResourceGroupName: resource_group 14 region: centralus 15 resourceGroupName: existing_resource_group 16 networkResourceGroupName: vnet_resource_group 17 virtualNetwork: vnet 18 controlPlaneSubnet: control_plane_subnet 19 computeSubnet: compute_subnet 20 outboundType: Loadbalancer cloudName: AzurePublicCloud pullSecret: '{\"auths\": ...}' 21 fips: false 22 sshKey: ssh-ed25519 AAAA... 23", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: azure_subscription_id: <base64_encoded_azure_subscription_id> azure_client_id: <base64_encoded_azure_client_id> azure_client_secret: <base64_encoded_azure_client_secret> azure_tenant_id: <base64_encoded_azure_tenant_id> azure_resource_prefix: <base64_encoded_azure_resource_prefix> azure_resourcegroup: <base64_encoded_azure_resourcegroup> azure_region: <base64_encoded_azure_region>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl.<rhel_version>\" \\ 1 -a ~/.pull-secret", "chmod 775 ccoctl.<rhel_version>", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "az login", "ccoctl azure create-all --name=<azure_infra_name> \\ 1 --output-dir=<ccoctl_output_dir> \\ 2 --region=<azure_region> \\ 3 --subscription-id=<azure_subscription_id> \\ 4 --credentials-requests-dir=<path_to_credentials_requests_directory> \\ 5 --dnszone-resource-group-name=<azure_dns_zone_resource_group_name> \\ 6 --tenant-id=<azure_tenant_id> 7", "ls <path_to_ccoctl_output_dir>/manifests", "azure-ad-pod-identity-webhook-config.yaml cluster-authentication-02-config.yaml openshift-cloud-controller-manager-azure-cloud-credentials-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capz-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-disk-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-file-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-azure-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "apiVersion: v1 baseDomain: example.com platform: azure: resourceGroupName: <azure_infra_name> 1", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "The cluster is configured so that the Operators do not create public records for the cluster and all cluster machines are placed in the private subnets that you specify.", "mkdir <installation_directory>", "controlPlane: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "compute: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "controlPlane: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "compute: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "apiVersion: v1 baseDomain: example.com 1 controlPlane: 2 hyperthreading: Enabled 3 4 name: master platform: azure: encryptionAtHost: true ultraSSDCapability: Enabled osDisk: diskSizeGB: 1024 5 diskType: Premium_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version type: Standard_D8s_v3 replicas: 3 compute: 6 - hyperthreading: Enabled 7 8 name: worker platform: azure: ultraSSDCapability: Enabled type: Standard_D2s_v3 encryptionAtHost: true osDisk: diskSizeGB: 512 9 diskType: Standard_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version zones: 10 - \"1\" - \"2\" - \"3\" replicas: 5 metadata: name: test-cluster 11 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 12 serviceNetwork: - 172.30.0.0/16 platform: azure: defaultMachinePlatform: osImage: 13 publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version ultraSSDCapability: Enabled baseDomainResourceGroupName: resource_group 14 region: centralus 15 resourceGroupName: existing_resource_group 16 networkResourceGroupName: vnet_resource_group 17 virtualNetwork: vnet 18 controlPlaneSubnet: control_plane_subnet 19 computeSubnet: compute_subnet 20 outboundType: UserDefinedRouting 21 cloudName: AzurePublicCloud pullSecret: '{\"auths\": ...}' 22 fips: false 23 sshKey: ssh-ed25519 AAAA... 24 publish: Internal 25", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AzureProviderSpec roleBindings: - role: Contributor secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: azure_subscription_id: <base64_encoded_azure_subscription_id> azure_client_id: <base64_encoded_azure_client_id> azure_client_secret: <base64_encoded_azure_client_secret> azure_tenant_id: <base64_encoded_azure_tenant_id> azure_resource_prefix: <base64_encoded_azure_resource_prefix> azure_resourcegroup: <base64_encoded_azure_resourcegroup> azure_region: <base64_encoded_azure_region>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl.<rhel_version>\" \\ 1 -a ~/.pull-secret", "chmod 775 ccoctl.<rhel_version>", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "az login", "ccoctl azure create-all --name=<azure_infra_name> \\ 1 --output-dir=<ccoctl_output_dir> \\ 2 --region=<azure_region> \\ 3 --subscription-id=<azure_subscription_id> \\ 4 --credentials-requests-dir=<path_to_credentials_requests_directory> \\ 5 --dnszone-resource-group-name=<azure_dns_zone_resource_group_name> \\ 6 --tenant-id=<azure_tenant_id> 7", "ls <path_to_ccoctl_output_dir>/manifests", "azure-ad-pod-identity-webhook-config.yaml cluster-authentication-02-config.yaml openshift-cloud-controller-manager-azure-cloud-credentials-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capz-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-disk-credentials-credentials.yaml openshift-cluster-csi-drivers-azure-file-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-azure-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "apiVersion: v1 baseDomain: example.com platform: azure: resourceGroupName: <azure_infra_name> 1", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create manifests --dir <installation_directory>", "INFO Consuming Install Config from target directory INFO Manifests created in: <installation_directory>/manifests and <installation_directory>/openshift", "touch imageregistry-config.yaml", "apiVersion: imageregistry.operator.openshift.io/v1 kind: Config metadata: name: cluster spec: managementState: \"Managed\" replicas: 2 rolloutStrategy: RollingUpdate storage: azure: networkAccess: internal: networkResourceGroupName: <vnet_resource_group> 1 subnetName: <subnet_name> 2 vnetName: <vnet_name> 3 type: Internal", "mv imageregistry-config.yaml <installation_directory/manifests/>", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "The cluster is configured so that the Operators do not create public records for the cluster and all cluster machines are placed in the private subnets that you specify.", "mkdir <installation_directory>", "controlPlane: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "compute: platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "platform: azure: settings: securityType: TrustedLaunch trustedLaunch: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled", "controlPlane: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "compute: platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "platform: azure: settings: securityType: ConfidentialVM confidentialVM: uefiSettings: secureBoot: Enabled virtualizedTrustedPlatformModule: Enabled osDisk: securityProfile: securityEncryptionType: VMGuestStateOnly", "apiVersion: v1 baseDomain: example.com 1 controlPlane: 2 hyperthreading: Enabled 3 4 name: master platform: azure: encryptionAtHost: true ultraSSDCapability: Enabled osDisk: diskSizeGB: 1024 5 diskType: Premium_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version type: Standard_D8s_v3 replicas: 3 compute: 6 - hyperthreading: Enabled 7 8 name: worker platform: azure: ultraSSDCapability: Enabled type: Standard_D2s_v3 encryptionAtHost: true osDisk: diskSizeGB: 512 9 diskType: Standard_LRS diskEncryptionSet: resourceGroup: disk_encryption_set_resource_group name: disk_encryption_set_name subscriptionId: secondary_subscription_id osImage: publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version zones: 10 - \"1\" - \"2\" - \"3\" replicas: 5 metadata: name: test-cluster 11 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 12 serviceNetwork: - 172.30.0.0/16 platform: azure: defaultMachinePlatform: osImage: 13 publisher: example_publisher_name offer: example_image_offer sku: example_offer_sku version: example_image_version ultraSSDCapability: Enabled baseDomainResourceGroupName: resource_group 14 region: usgovvirginia resourceGroupName: existing_resource_group 15 networkResourceGroupName: vnet_resource_group 16 virtualNetwork: vnet 17 controlPlaneSubnet: control_plane_subnet 18 computeSubnet: compute_subnet 19 outboundType: UserDefinedRouting 20 cloudName: AzureUSGovernmentCloud 21 pullSecret: '{\"auths\": ...}' 22 fips: false 23 sshKey: ssh-ed25519 AAAA... 24 publish: Internal 25", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "tar -xvf openshift-install-linux.tar.gz", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "az login", "az account list --refresh", "[ { \"cloudName\": \"AzureCloud\", \"id\": \"9bab1460-96d5-40b3-a78e-17b15e978a80\", \"isDefault\": true, \"name\": \"Subscription Name\", \"state\": \"Enabled\", \"tenantId\": \"6057c7e9-b3ae-489d-a54e-de3f6bf6a8ee\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } } ]", "az account show", "{ \"environmentName\": \"AzureCloud\", \"id\": \"9bab1460-96d5-40b3-a78e-17b15e978a80\", \"isDefault\": true, \"name\": \"Subscription Name\", \"state\": \"Enabled\", \"tenantId\": \"6057c7e9-b3ae-489d-a54e-de3f6bf6a8ee\", 1 \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }", "az account set -s <subscription_id> 1", "az account show", "{ \"environmentName\": \"AzureCloud\", \"id\": \"33212d16-bdf6-45cb-b038-f6565b61edda\", \"isDefault\": true, \"name\": \"Subscription Name\", \"state\": \"Enabled\", \"tenantId\": \"8049c7e9-c3de-762d-a54e-dc3f6be6a7ee\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }", "az ad sp create-for-rbac --role <role_name> \\ 1 --name <service_principal> \\ 2 --scopes /subscriptions/<subscription_id> 3", "Creating 'Contributor' role assignment under scope '/subscriptions/<subscription_id>' The output includes credentials that you must protect. Be sure that you do not include these credentials in your code or check the credentials into your source control. For more information, see https://aka.ms/azadsp-cli { \"appId\": \"ac461d78-bf4b-4387-ad16-7e32e328aec6\", \"displayName\": <service_principal>\", \"password\": \"00000000-0000-0000-0000-000000000000\", \"tenantId\": \"8049c7e9-c3de-762d-a54e-dc3f6be6a7ee\" }", "az role assignment create --role \"User Access Administrator\" --assignee-object-id USD(az ad sp show --id <appId> --query id -o tsv) 1", "az vm image list --all --offer rh-ocp-worker --publisher redhat -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- ----------------- rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocp-worker:4.15.2024072409 4.15.2024072409 rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.15.2024072409 4.15.2024072409", "az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- ----------------- rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.15.2024072409 4.15.2024072409 rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.15.2024072409 4.15.2024072409", "az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "\"plan\" : { \"name\": \"rh-ocp-worker\", \"product\": \"rh-ocp-worker\", \"publisher\": \"redhat\" }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]\" ], \"properties\" : { \"storageProfile\": { \"imageReference\": { \"offer\": \"rh-ocp-worker\", \"publisher\": \"redhat\", \"sku\": \"rh-ocp-worker\", \"version\": \"413.92.2023101700\" } } }", "tar -xvf openshift-install-linux.tar.gz", "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "mkdir USDHOME/clusterconfig", "openshift-install create manifests --dir USDHOME/clusterconfig", "? SSH Public Key INFO Credentials loaded from the \"myprofile\" profile in file \"/home/myuser/.aws/credentials\" INFO Consuming Install Config from target directory INFO Manifests created in: USDHOME/clusterconfig/manifests and USDHOME/clusterconfig/openshift", "ls USDHOME/clusterconfig/openshift/", "99_kubeadmin-password-secret.yaml 99_openshift-cluster-api_master-machines-0.yaml 99_openshift-cluster-api_master-machines-1.yaml 99_openshift-cluster-api_master-machines-2.yaml", "variant: openshift version: 4.17.0 metadata: labels: machineconfiguration.openshift.io/role: worker name: 98-var-partition storage: disks: - device: /dev/disk/by-id/<device_name> 1 partitions: - label: var start_mib: <partition_start_offset> 2 size_mib: <partition_size> 3 number: 5 filesystems: - device: /dev/disk/by-partlabel/var path: /var format: xfs mount_options: [defaults, prjquota] 4 with_mount_unit: true", "butane USDHOME/clusterconfig/98-var-partition.bu -o USDHOME/clusterconfig/openshift/98-var-partition.yaml", "openshift-install create ignition-configs --dir USDHOME/clusterconfig ls USDHOME/clusterconfig/ auth bootstrap.ign master.ign metadata.json worker.ign", "./openshift-install create install-config --dir <installation_directory> 1", "pullSecret: '{\"auths\":{\"<mirror_host_name>:5000\": {\"auth\": \"<credentials>\",\"email\": \"[email protected]\"}}}'", "additionalTrustBundle: | -----BEGIN CERTIFICATE----- ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ -----END CERTIFICATE-----", "networkResourceGroupName: <vnet_resource_group> 1 virtualNetwork: <vnet> 2 controlPlaneSubnet: <control_plane_subnet> 3 computeSubnet: <compute_subnet> 4", "imageContentSources: - mirrors: - <mirror_host_name>:5000/<repo_name>/release source: quay.io/openshift-release-dev/ocp-release - mirrors: - <mirror_host_name>:5000/<repo_name>/release source: registry.redhat.io/ocp/release", "publish: Internal", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "export CLUSTER_NAME=<cluster_name> 1 export AZURE_REGION=<azure_region> 2 export SSH_KEY=<ssh_key> 3 export BASE_DOMAIN=<base_domain> 4 export BASE_DOMAIN_RESOURCE_GROUP=<base_domain_resource_group> 5", "export CLUSTER_NAME=test-cluster export AZURE_REGION=centralus export SSH_KEY=\"ssh-rsa xxx/xxx/xxx= [email protected]\" export BASE_DOMAIN=example.com export BASE_DOMAIN_RESOURCE_GROUP=ocp-cluster", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "./openshift-install create manifests --dir <installation_directory> 1", "rm -f <installation_directory>/openshift/99_openshift-cluster-api_master-machines-*.yaml", "rm -f <installation_directory>/openshift/99_openshift-machine-api_master-control-plane-machine-set.yaml", "rm -f <installation_directory>/openshift/99_openshift-cluster-api_worker-machineset-*.yaml", "apiVersion: config.openshift.io/v1 kind: DNS metadata: creationTimestamp: null name: cluster spec: baseDomain: example.openshift.com privateZone: 1 id: mycluster-100419-private-zone publicZone: 2 id: example.openshift.com status: {}", "export INFRA_ID=<infra_id> 1", "export RESOURCE_GROUP=<resource_group> 1", "./openshift-install create ignition-configs --dir <installation_directory> 1", ". ├── auth │ ├── kubeadmin-password │ └── kubeconfig ├── bootstrap.ign ├── master.ign ├── metadata.json └── worker.ign", "az group create --name USD{RESOURCE_GROUP} --location USD{AZURE_REGION}", "az identity create -g USD{RESOURCE_GROUP} -n USD{INFRA_ID}-identity", "export PRINCIPAL_ID=`az identity show -g USD{RESOURCE_GROUP} -n USD{INFRA_ID}-identity --query principalId --out tsv`", "export RESOURCE_GROUP_ID=`az group show -g USD{RESOURCE_GROUP} --query id --out tsv`", "az role assignment create --assignee \"USD{PRINCIPAL_ID}\" --role 'Contributor' --scope \"USD{RESOURCE_GROUP_ID}\"", "az role assignment create --assignee \"USD{PRINCIPAL_ID}\" --role <custom_role> \\ 1 --scope \"USD{RESOURCE_GROUP_ID}\"", "az storage account create -g USD{RESOURCE_GROUP} --location USD{AZURE_REGION} --name USD{CLUSTER_NAME}sa --kind Storage --sku Standard_LRS", "export ACCOUNT_KEY=`az storage account keys list -g USD{RESOURCE_GROUP} --account-name USD{CLUSTER_NAME}sa --query \"[0].value\" -o tsv`", "export VHD_URL=`openshift-install coreos print-stream-json | jq -r '.architectures.<architecture>.\"rhel-coreos-extensions\".\"azure-disk\".url'`", "az storage container create --name vhd --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY}", "az storage blob copy start --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} --destination-blob \"rhcos.vhd\" --destination-container vhd --source-uri \"USD{VHD_URL}\"", "az storage container create --name files --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY}", "az storage blob upload --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} -c \"files\" -f \"<installation_directory>/bootstrap.ign\" -n \"bootstrap.ign\"", "az network dns zone create -g USD{BASE_DOMAIN_RESOURCE_GROUP} -n USD{CLUSTER_NAME}.USD{BASE_DOMAIN}", "az network private-dns zone create -g USD{RESOURCE_GROUP} -n USD{CLUSTER_NAME}.USD{BASE_DOMAIN}", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/01_vnet.json\" --parameters baseName=\"USD{INFRA_ID}\" 1", "az network private-dns link vnet create -g USD{RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n USD{INFRA_ID}-network-link -v \"USD{INFRA_ID}-vnet\" -e false", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(parameters('baseName'), '-vnet')]\", \"addressPrefix\" : \"10.0.0.0/16\", \"masterSubnetName\" : \"[concat(parameters('baseName'), '-master-subnet')]\", \"masterSubnetPrefix\" : \"10.0.0.0/24\", \"nodeSubnetName\" : \"[concat(parameters('baseName'), '-worker-subnet')]\", \"nodeSubnetPrefix\" : \"10.0.1.0/24\", \"clusterNsgName\" : \"[concat(parameters('baseName'), '-nsg')]\" }, \"resources\" : [ { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/virtualNetworks\", \"name\" : \"[variables('virtualNetworkName')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[concat('Microsoft.Network/networkSecurityGroups/', variables('clusterNsgName'))]\" ], \"properties\" : { \"addressSpace\" : { \"addressPrefixes\" : [ \"[variables('addressPrefix')]\" ] }, \"subnets\" : [ { \"name\" : \"[variables('masterSubnetName')]\", \"properties\" : { \"addressPrefix\" : \"[variables('masterSubnetPrefix')]\", \"serviceEndpoints\": [], \"networkSecurityGroup\" : { \"id\" : \"[resourceId('Microsoft.Network/networkSecurityGroups', variables('clusterNsgName'))]\" } } }, { \"name\" : \"[variables('nodeSubnetName')]\", \"properties\" : { \"addressPrefix\" : \"[variables('nodeSubnetPrefix')]\", \"serviceEndpoints\": [], \"networkSecurityGroup\" : { \"id\" : \"[resourceId('Microsoft.Network/networkSecurityGroups', variables('clusterNsgName'))]\" } } } ] } }, { \"type\" : \"Microsoft.Network/networkSecurityGroups\", \"name\" : \"[variables('clusterNsgName')]\", \"apiVersion\" : \"2018-10-01\", \"location\" : \"[variables('location')]\", \"properties\" : { \"securityRules\" : [ { \"name\" : \"apiserver_in\", \"properties\" : { \"protocol\" : \"Tcp\", \"sourcePortRange\" : \"*\", \"destinationPortRange\" : \"6443\", \"sourceAddressPrefix\" : \"*\", \"destinationAddressPrefix\" : \"*\", \"access\" : \"Allow\", \"priority\" : 101, \"direction\" : \"Inbound\" } } ] } } ] }", "export VHD_BLOB_URL=`az storage blob url --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} -c vhd -n \"rhcos.vhd\" -o tsv`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/02_storage.json\" --parameters vhdBlobURL=\"USD{VHD_BLOB_URL}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameters storageAccount=\"USD{CLUSTER_NAME}sa\" \\ 3 --parameters architecture=\"<architecture>\" 4", "{ \"USDschema\": \"https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#\", \"contentVersion\": \"1.0.0.0\", \"parameters\": { \"architecture\": { \"type\": \"string\", \"metadata\": { \"description\": \"The architecture of the Virtual Machines\" }, \"defaultValue\": \"x64\", \"allowedValues\": [ \"Arm64\", \"x64\" ] }, \"baseName\": { \"type\": \"string\", \"minLength\": 1, \"metadata\": { \"description\": \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"storageAccount\": { \"type\": \"string\", \"metadata\": { \"description\": \"The Storage Account name\" } }, \"vhdBlobURL\": { \"type\": \"string\", \"metadata\": { \"description\": \"URL pointing to the blob where the VHD to be used to create master and worker machines is located\" } } }, \"variables\": { \"location\": \"[resourceGroup().location]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\": \"[parameters('baseName')]\", \"imageNameGen2\": \"[concat(parameters('baseName'), '-gen2')]\", \"imageRelease\": \"1.0.0\" }, \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"Microsoft.Compute/galleries\", \"name\": \"[variables('galleryName')]\", \"location\": \"[variables('location')]\", \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"images\", \"name\": \"[variables('imageName')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('galleryName')]\" ], \"properties\": { \"architecture\": \"[parameters('architecture')]\", \"hyperVGeneration\": \"V1\", \"identifier\": { \"offer\": \"rhcos\", \"publisher\": \"RedHat\", \"sku\": \"basic\" }, \"osState\": \"Generalized\", \"osType\": \"Linux\" }, \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"versions\", \"name\": \"[variables('imageRelease')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('imageName')]\" ], \"properties\": { \"publishingProfile\": { \"storageAccountType\": \"Standard_LRS\", \"targetRegions\": [ { \"name\": \"[variables('location')]\", \"regionalReplicaCount\": \"1\" } ] }, \"storageProfile\": { \"osDiskImage\": { \"source\": { \"id\": \"[resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccount'))]\", \"uri\": \"[parameters('vhdBlobURL')]\" } } } } } ] }, { \"apiVersion\": \"2021-10-01\", \"type\": \"images\", \"name\": \"[variables('imageNameGen2')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('galleryName')]\" ], \"properties\": { \"architecture\": \"[parameters('architecture')]\", \"hyperVGeneration\": \"V2\", \"identifier\": { \"offer\": \"rhcos-gen2\", \"publisher\": \"RedHat-gen2\", \"sku\": \"gen2\" }, \"osState\": \"Generalized\", \"osType\": \"Linux\" }, \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"versions\", \"name\": \"[variables('imageRelease')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('imageNameGen2')]\" ], \"properties\": { \"publishingProfile\": { \"storageAccountType\": \"Standard_LRS\", \"targetRegions\": [ { \"name\": \"[variables('location')]\", \"regionalReplicaCount\": \"1\" } ] }, \"storageProfile\": { \"osDiskImage\": { \"source\": { \"id\": \"[resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccount'))]\", \"uri\": \"[parameters('vhdBlobURL')]\" } } } } } ] } ] } ] }", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/03_infra.json\" --parameters privateDNSZoneName=\"USD{CLUSTER_NAME}.USD{BASE_DOMAIN}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" 2", "export PUBLIC_IP=`az network public-ip list -g USD{RESOURCE_GROUP} --query \"[?name=='USD{INFRA_ID}-master-pip'] | [0].ipAddress\" -o tsv`", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n api -a USD{PUBLIC_IP} --ttl 60", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{BASE_DOMAIN} -n api.USD{CLUSTER_NAME} -a USD{PUBLIC_IP} --ttl 60", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"privateDNSZoneName\" : { \"type\" : \"string\", \"metadata\" : { \"description\" : \"Name of the private DNS zone\" } } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"masterSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-master-subnet')]\", \"masterSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]\", \"masterPublicIpAddressName\" : \"[concat(parameters('baseName'), '-master-pip')]\", \"masterPublicIpAddressID\" : \"[resourceId('Microsoft.Network/publicIPAddresses', variables('masterPublicIpAddressName'))]\", \"masterLoadBalancerName\" : \"[parameters('baseName')]\", \"masterLoadBalancerID\" : \"[resourceId('Microsoft.Network/loadBalancers', variables('masterLoadBalancerName'))]\", \"internalLoadBalancerName\" : \"[concat(parameters('baseName'), '-internal-lb')]\", \"internalLoadBalancerID\" : \"[resourceId('Microsoft.Network/loadBalancers', variables('internalLoadBalancerName'))]\", \"skuName\": \"Standard\" }, \"resources\" : [ { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/publicIPAddresses\", \"name\" : \"[variables('masterPublicIpAddressName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"[variables('skuName')]\" }, \"properties\" : { \"publicIPAllocationMethod\" : \"Static\", \"dnsSettings\" : { \"domainNameLabel\" : \"[variables('masterPublicIpAddressName')]\" } } }, { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/loadBalancers\", \"name\" : \"[variables('masterLoadBalancerName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"[variables('skuName')]\" }, \"dependsOn\" : [ \"[concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIpAddressName'))]\" ], \"properties\" : { \"frontendIPConfigurations\" : [ { \"name\" : \"public-lb-ip-v4\", \"properties\" : { \"publicIPAddress\" : { \"id\" : \"[variables('masterPublicIpAddressID')]\" } } } ], \"backendAddressPools\" : [ { \"name\" : \"[variables('masterLoadBalancerName')]\" } ], \"loadBalancingRules\" : [ { \"name\" : \"api-internal\", \"properties\" : { \"frontendIPConfiguration\" : { \"id\" :\"[concat(variables('masterLoadBalancerID'), '/frontendIPConfigurations/public-lb-ip-v4')]\" }, \"backendAddressPool\" : { \"id\" : \"[concat(variables('masterLoadBalancerID'), '/backendAddressPools/', variables('masterLoadBalancerName'))]\" }, \"protocol\" : \"Tcp\", \"loadDistribution\" : \"Default\", \"idleTimeoutInMinutes\" : 30, \"frontendPort\" : 6443, \"backendPort\" : 6443, \"probe\" : { \"id\" : \"[concat(variables('masterLoadBalancerID'), '/probes/api-internal-probe')]\" } } } ], \"probes\" : [ { \"name\" : \"api-internal-probe\", \"properties\" : { \"protocol\" : \"Https\", \"port\" : 6443, \"requestPath\": \"/readyz\", \"intervalInSeconds\" : 10, \"numberOfProbes\" : 3 } } ] } }, { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/loadBalancers\", \"name\" : \"[variables('internalLoadBalancerName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"[variables('skuName')]\" }, \"properties\" : { \"frontendIPConfigurations\" : [ { \"name\" : \"internal-lb-ip\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"subnet\" : { \"id\" : \"[variables('masterSubnetRef')]\" }, \"privateIPAddressVersion\" : \"IPv4\" } } ], \"backendAddressPools\" : [ { \"name\" : \"internal-lb-backend\" } ], \"loadBalancingRules\" : [ { \"name\" : \"api-internal\", \"properties\" : { \"frontendIPConfiguration\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/frontendIPConfigurations/internal-lb-ip')]\" }, \"frontendPort\" : 6443, \"backendPort\" : 6443, \"enableFloatingIP\" : false, \"idleTimeoutInMinutes\" : 30, \"protocol\" : \"Tcp\", \"enableTcpReset\" : false, \"loadDistribution\" : \"Default\", \"backendAddressPool\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/backendAddressPools/internal-lb-backend')]\" }, \"probe\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/probes/api-internal-probe')]\" } } }, { \"name\" : \"sint\", \"properties\" : { \"frontendIPConfiguration\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/frontendIPConfigurations/internal-lb-ip')]\" }, \"frontendPort\" : 22623, \"backendPort\" : 22623, \"enableFloatingIP\" : false, \"idleTimeoutInMinutes\" : 30, \"protocol\" : \"Tcp\", \"enableTcpReset\" : false, \"loadDistribution\" : \"Default\", \"backendAddressPool\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/backendAddressPools/internal-lb-backend')]\" }, \"probe\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/probes/sint-probe')]\" } } } ], \"probes\" : [ { \"name\" : \"api-internal-probe\", \"properties\" : { \"protocol\" : \"Https\", \"port\" : 6443, \"requestPath\": \"/readyz\", \"intervalInSeconds\" : 10, \"numberOfProbes\" : 3 } }, { \"name\" : \"sint-probe\", \"properties\" : { \"protocol\" : \"Https\", \"port\" : 22623, \"requestPath\": \"/healthz\", \"intervalInSeconds\" : 10, \"numberOfProbes\" : 3 } } ] } }, { \"apiVersion\": \"2018-09-01\", \"type\": \"Microsoft.Network/privateDnsZones/A\", \"name\": \"[concat(parameters('privateDNSZoneName'), '/api')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[concat('Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'))]\" ], \"properties\": { \"ttl\": 60, \"aRecords\": [ { \"ipv4Address\": \"[reference(variables('internalLoadBalancerName')).frontendIPConfigurations[0].properties.privateIPAddress]\" } ] } }, { \"apiVersion\": \"2018-09-01\", \"type\": \"Microsoft.Network/privateDnsZones/A\", \"name\": \"[concat(parameters('privateDNSZoneName'), '/api-int')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[concat('Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'))]\" ], \"properties\": { \"ttl\": 60, \"aRecords\": [ { \"ipv4Address\": \"[reference(variables('internalLoadBalancerName')).frontendIPConfigurations[0].properties.privateIPAddress]\" } ] } } ] }", "bootstrap_url_expiry=`date -u -d \"10 hours\" '+%Y-%m-%dT%H:%MZ'`", "export BOOTSTRAP_URL=`az storage blob generate-sas -c 'files' -n 'bootstrap.ign' --https-only --full-uri --permissions r --expiry USDbootstrap_url_expiry --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} -o tsv`", "export BOOTSTRAP_IGNITION=`jq -rcnM --arg v \"3.2.0\" --arg url USD{BOOTSTRAP_URL} '{ignition:{version:USDv,config:{replace:{source:USDurl}}}}' | base64 | tr -d '\\n'`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/04_bootstrap.json\" --parameters bootstrapIgnition=\"USD{BOOTSTRAP_IGNITION}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameter bootstrapVMSize=\"Standard_D4s_v3\" 3", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"bootstrapIgnition\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Bootstrap ignition content for the bootstrap cluster\" } }, \"sshKeyData\" : { \"type\" : \"securestring\", \"defaultValue\" : \"Unused\", \"metadata\" : { \"description\" : \"Unused\" } }, \"bootstrapVMSize\" : { \"type\" : \"string\", \"defaultValue\" : \"Standard_D4s_v3\", \"metadata\" : { \"description\" : \"The size of the Bootstrap Virtual Machine\" } }, \"hyperVGen\": { \"type\": \"string\", \"metadata\": { \"description\": \"VM generation image to use\" }, \"defaultValue\": \"V2\", \"allowedValues\": [ \"V1\", \"V2\" ] } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"masterSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-master-subnet')]\", \"masterSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]\", \"masterLoadBalancerName\" : \"[parameters('baseName')]\", \"internalLoadBalancerName\" : \"[concat(parameters('baseName'), '-internal-lb')]\", \"sshKeyPath\" : \"/home/core/.ssh/authorized_keys\", \"identityName\" : \"[concat(parameters('baseName'), '-identity')]\", \"vmName\" : \"[concat(parameters('baseName'), '-bootstrap')]\", \"nicName\" : \"[concat(variables('vmName'), '-nic')]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\" : \"[concat(parameters('baseName'), if(equals(parameters('hyperVGen'), 'V2'), '-gen2', ''))]\", \"clusterNsgName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-nsg')]\", \"sshPublicIpAddressName\" : \"[concat(variables('vmName'), '-ssh-pip')]\" }, \"resources\" : [ { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/publicIPAddresses\", \"name\" : \"[variables('sshPublicIpAddressName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"Standard\" }, \"properties\" : { \"publicIPAllocationMethod\" : \"Static\", \"dnsSettings\" : { \"domainNameLabel\" : \"[variables('sshPublicIpAddressName')]\" } } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Network/networkInterfaces\", \"name\" : \"[variables('nicName')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[resourceId('Microsoft.Network/publicIPAddresses', variables('sshPublicIpAddressName'))]\" ], \"properties\" : { \"ipConfigurations\" : [ { \"name\" : \"pipConfig\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"publicIPAddress\": { \"id\": \"[resourceId('Microsoft.Network/publicIPAddresses', variables('sshPublicIpAddressName'))]\" }, \"subnet\" : { \"id\" : \"[variables('masterSubnetRef')]\" }, \"loadBalancerBackendAddressPools\" : [ { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('masterLoadBalancerName'), '/backendAddressPools/', variables('masterLoadBalancerName'))]\" }, { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'), '/backendAddressPools/internal-lb-backend')]\" } ] } } ] } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Compute/virtualMachines\", \"name\" : \"[variables('vmName')]\", \"location\" : \"[variables('location')]\", \"identity\" : { \"type\" : \"userAssigned\", \"userAssignedIdentities\" : { \"[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]\" : {} } }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]\" ], \"properties\" : { \"hardwareProfile\" : { \"vmSize\" : \"[parameters('bootstrapVMSize')]\" }, \"osProfile\" : { \"computerName\" : \"[variables('vmName')]\", \"adminUsername\" : \"core\", \"adminPassword\" : \"NotActuallyApplied!\", \"customData\" : \"[parameters('bootstrapIgnition')]\", \"linuxConfiguration\" : { \"disablePasswordAuthentication\" : false } }, \"storageProfile\" : { \"imageReference\": { \"id\": \"[resourceId('Microsoft.Compute/galleries/images', variables('galleryName'), variables('imageName'))]\" }, \"osDisk\" : { \"name\": \"[concat(variables('vmName'),'_OSDisk')]\", \"osType\" : \"Linux\", \"createOption\" : \"FromImage\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\" }, \"diskSizeGB\" : 100 } }, \"networkProfile\" : { \"networkInterfaces\" : [ { \"id\" : \"[resourceId('Microsoft.Network/networkInterfaces', variables('nicName'))]\" } ] } } }, { \"apiVersion\" : \"2018-06-01\", \"type\": \"Microsoft.Network/networkSecurityGroups/securityRules\", \"name\" : \"[concat(variables('clusterNsgName'), '/bootstrap_ssh_in')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[resourceId('Microsoft.Compute/virtualMachines', variables('vmName'))]\" ], \"properties\": { \"protocol\" : \"Tcp\", \"sourcePortRange\" : \"*\", \"destinationPortRange\" : \"22\", \"sourceAddressPrefix\" : \"*\", \"destinationAddressPrefix\" : \"*\", \"access\" : \"Allow\", \"priority\" : 100, \"direction\" : \"Inbound\" } } ] }", "export MASTER_IGNITION=`cat <installation_directory>/master.ign | base64 | tr -d '\\n'`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/05_masters.json\" --parameters masterIgnition=\"USD{MASTER_IGNITION}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameters masterVMSize=\"Standard_D8s_v3\" 3", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"masterIgnition\" : { \"type\" : \"string\", \"metadata\" : { \"description\" : \"Ignition content for the master nodes\" } }, \"numberOfMasters\" : { \"type\" : \"int\", \"defaultValue\" : 3, \"minValue\" : 2, \"maxValue\" : 30, \"metadata\" : { \"description\" : \"Number of OpenShift masters to deploy\" } }, \"sshKeyData\" : { \"type\" : \"securestring\", \"defaultValue\" : \"Unused\", \"metadata\" : { \"description\" : \"Unused\" } }, \"privateDNSZoneName\" : { \"type\" : \"string\", \"defaultValue\" : \"\", \"metadata\" : { \"description\" : \"unused\" } }, \"masterVMSize\" : { \"type\" : \"string\", \"defaultValue\" : \"Standard_D8s_v3\", \"metadata\" : { \"description\" : \"The size of the Master Virtual Machines\" } }, \"diskSizeGB\" : { \"type\" : \"int\", \"defaultValue\" : 1024, \"metadata\" : { \"description\" : \"Size of the Master VM OS disk, in GB\" } }, \"hyperVGen\": { \"type\": \"string\", \"metadata\": { \"description\": \"VM generation image to use\" }, \"defaultValue\": \"V2\", \"allowedValues\": [ \"V1\", \"V2\" ] } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"masterSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-master-subnet')]\", \"masterSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]\", \"masterLoadBalancerName\" : \"[parameters('baseName')]\", \"internalLoadBalancerName\" : \"[concat(parameters('baseName'), '-internal-lb')]\", \"sshKeyPath\" : \"/home/core/.ssh/authorized_keys\", \"identityName\" : \"[concat(parameters('baseName'), '-identity')]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\" : \"[concat(parameters('baseName'), if(equals(parameters('hyperVGen'), 'V2'), '-gen2', ''))]\", \"copy\" : [ { \"name\" : \"vmNames\", \"count\" : \"[parameters('numberOfMasters')]\", \"input\" : \"[concat(parameters('baseName'), '-master-', copyIndex('vmNames'))]\" } ] }, \"resources\" : [ { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Network/networkInterfaces\", \"copy\" : { \"name\" : \"nicCopy\", \"count\" : \"[length(variables('vmNames'))]\" }, \"name\" : \"[concat(variables('vmNames')[copyIndex()], '-nic')]\", \"location\" : \"[variables('location')]\", \"properties\" : { \"ipConfigurations\" : [ { \"name\" : \"pipConfig\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"subnet\" : { \"id\" : \"[variables('masterSubnetRef')]\" }, \"loadBalancerBackendAddressPools\" : [ { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('masterLoadBalancerName'), '/backendAddressPools/', variables('masterLoadBalancerName'))]\" }, { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'), '/backendAddressPools/internal-lb-backend')]\" } ] } } ] } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Compute/virtualMachines\", \"copy\" : { \"name\" : \"vmCopy\", \"count\" : \"[length(variables('vmNames'))]\" }, \"name\" : \"[variables('vmNames')[copyIndex()]]\", \"location\" : \"[variables('location')]\", \"identity\" : { \"type\" : \"userAssigned\", \"userAssignedIdentities\" : { \"[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]\" : {} } }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]\" ], \"properties\" : { \"hardwareProfile\" : { \"vmSize\" : \"[parameters('masterVMSize')]\" }, \"osProfile\" : { \"computerName\" : \"[variables('vmNames')[copyIndex()]]\", \"adminUsername\" : \"core\", \"adminPassword\" : \"NotActuallyApplied!\", \"customData\" : \"[parameters('masterIgnition')]\", \"linuxConfiguration\" : { \"disablePasswordAuthentication\" : false } }, \"storageProfile\" : { \"imageReference\": { \"id\": \"[resourceId('Microsoft.Compute/galleries/images', variables('galleryName'), variables('imageName'))]\" }, \"osDisk\" : { \"name\": \"[concat(variables('vmNames')[copyIndex()], '_OSDisk')]\", \"osType\" : \"Linux\", \"createOption\" : \"FromImage\", \"caching\": \"ReadOnly\", \"writeAcceleratorEnabled\": false, \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\" }, \"diskSizeGB\" : \"[parameters('diskSizeGB')]\" } }, \"networkProfile\" : { \"networkInterfaces\" : [ { \"id\" : \"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNames')[copyIndex()], '-nic'))]\", \"properties\": { \"primary\": false } } ] } } } ] }", "./openshift-install wait-for bootstrap-complete --dir <installation_directory> \\ 1 --log-level info 2", "az network nsg rule delete -g USD{RESOURCE_GROUP} --nsg-name USD{INFRA_ID}-nsg --name bootstrap_ssh_in az vm stop -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap az vm deallocate -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap az vm delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap --yes az disk delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap_OSDisk --no-wait --yes az network nic delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap-nic --no-wait az storage blob delete --account-key USD{ACCOUNT_KEY} --account-name USD{CLUSTER_NAME}sa --container-name files --name bootstrap.ign az network public-ip delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap-ssh-pip", "export WORKER_IGNITION=`cat <installation_directory>/worker.ign | base64 | tr -d '\\n'`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/06_workers.json\" --parameters workerIgnition=\"USD{WORKER_IGNITION}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameters nodeVMSize=\"Standard_D4s_v3\" 3", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"workerIgnition\" : { \"type\" : \"string\", \"metadata\" : { \"description\" : \"Ignition content for the worker nodes\" } }, \"numberOfNodes\" : { \"type\" : \"int\", \"defaultValue\" : 3, \"minValue\" : 2, \"maxValue\" : 30, \"metadata\" : { \"description\" : \"Number of OpenShift compute nodes to deploy\" } }, \"sshKeyData\" : { \"type\" : \"securestring\", \"defaultValue\" : \"Unused\", \"metadata\" : { \"description\" : \"Unused\" } }, \"nodeVMSize\" : { \"type\" : \"string\", \"defaultValue\" : \"Standard_D4s_v3\", \"metadata\" : { \"description\" : \"The size of the each Node Virtual Machine\" } }, \"hyperVGen\": { \"type\": \"string\", \"metadata\": { \"description\": \"VM generation image to use\" }, \"defaultValue\": \"V2\", \"allowedValues\": [ \"V1\", \"V2\" ] } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"nodeSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-worker-subnet')]\", \"nodeSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('nodeSubnetName'))]\", \"infraLoadBalancerName\" : \"[parameters('baseName')]\", \"sshKeyPath\" : \"/home/capi/.ssh/authorized_keys\", \"identityName\" : \"[concat(parameters('baseName'), '-identity')]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\" : \"[concat(parameters('baseName'), if(equals(parameters('hyperVGen'), 'V2'), '-gen2', ''))]\", \"copy\" : [ { \"name\" : \"vmNames\", \"count\" : \"[parameters('numberOfNodes')]\", \"input\" : \"[concat(parameters('baseName'), '-worker-', variables('location'), '-', copyIndex('vmNames', 1))]\" } ] }, \"resources\" : [ { \"apiVersion\" : \"2019-05-01\", \"name\" : \"[concat('node', copyIndex())]\", \"type\" : \"Microsoft.Resources/deployments\", \"copy\" : { \"name\" : \"nodeCopy\", \"count\" : \"[length(variables('vmNames'))]\" }, \"properties\" : { \"mode\" : \"Incremental\", \"template\" : { \"USDschema\" : \"http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"resources\" : [ { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Network/networkInterfaces\", \"name\" : \"[concat(variables('vmNames')[copyIndex()], '-nic')]\", \"location\" : \"[variables('location')]\", \"properties\" : { \"ipConfigurations\" : [ { \"name\" : \"pipConfig\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"subnet\" : { \"id\" : \"[variables('nodeSubnetRef')]\" } } } ] } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Compute/virtualMachines\", \"name\" : \"[variables('vmNames')[copyIndex()]]\", \"location\" : \"[variables('location')]\", \"tags\" : { \"kubernetes.io-cluster-ffranzupi\": \"owned\" }, \"identity\" : { \"type\" : \"userAssigned\", \"userAssignedIdentities\" : { \"[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]\" : {} } }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]\" ], \"properties\" : { \"hardwareProfile\" : { \"vmSize\" : \"[parameters('nodeVMSize')]\" }, \"osProfile\" : { \"computerName\" : \"[variables('vmNames')[copyIndex()]]\", \"adminUsername\" : \"capi\", \"adminPassword\" : \"NotActuallyApplied!\", \"customData\" : \"[parameters('workerIgnition')]\", \"linuxConfiguration\" : { \"disablePasswordAuthentication\" : false } }, \"storageProfile\" : { \"imageReference\": { \"id\": \"[resourceId('Microsoft.Compute/galleries/images', variables('galleryName'), variables('imageName'))]\" }, \"osDisk\" : { \"name\": \"[concat(variables('vmNames')[copyIndex()],'_OSDisk')]\", \"osType\" : \"Linux\", \"createOption\" : \"FromImage\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\" }, \"diskSizeGB\": 128 } }, \"networkProfile\" : { \"networkInterfaces\" : [ { \"id\" : \"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNames')[copyIndex()], '-nic'))]\", \"properties\": { \"primary\": true } } ] } } } ] } } } ] }", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 63m v1.30.3 master-1 Ready master 63m v1.30.3 master-2 Ready master 64m v1.30.3", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-8b2br 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending csr-8vnps 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-bfd72 5m26s system:node:ip-10-0-50-126.us-east-2.compute.internal Pending csr-c57lv 5m26s system:node:ip-10-0-95-157.us-east-2.compute.internal Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs oc adm certificate approve", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 73m v1.30.3 master-1 Ready master 73m v1.30.3 master-2 Ready master 74m v1.30.3 worker-0 Ready worker 11m v1.30.3 worker-1 Ready worker 11m v1.30.3", "oc -n openshift-ingress get service router-default", "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE router-default LoadBalancer 172.30.20.10 35.130.120.110 80:32288/TCP,443:31215/TCP 20", "export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print USD4}'`", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n *.apps -a USD{PUBLIC_IP_ROUTER} --ttl 300", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{BASE_DOMAIN} -n *.apps.USD{CLUSTER_NAME} -a USD{PUBLIC_IP_ROUTER} --ttl 300", "az network private-dns record-set a create -g USD{RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n *.apps --ttl 300", "az network private-dns record-set a add-record -g USD{RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n *.apps -a USD{PUBLIC_IP_ROUTER}", "oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{\"\\n\"}{end}{end}' routes", "oauth-openshift.apps.cluster.basedomain.com console-openshift-console.apps.cluster.basedomain.com downloads-openshift-console.apps.cluster.basedomain.com alertmanager-main-openshift-monitoring.apps.cluster.basedomain.com prometheus-k8s-openshift-monitoring.apps.cluster.basedomain.com", "./openshift-install --dir <installation_directory> wait-for install-complete 1", "INFO Waiting up to 30m0s for the cluster to initialize", "az login", "az account list --refresh", "[ { \"cloudName\": \"AzureCloud\", \"id\": \"8xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": true, \"name\": \"Subscription Name 1\", \"state\": \"Enabled\", \"tenantId\": \"6xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }, { \"cloudName\": \"AzureCloud\", \"id\": \"9xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": false, \"name\": \"Subscription Name 2\", \"state\": \"Enabled\", \"tenantId\": \"7xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } } ]", "az account show", "{ \"environmentName\": \"AzureCloud\", \"id\": \"8xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": true, \"name\": \"Subscription Name 1\", \"state\": \"Enabled\", \"tenantId\": \"6xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }", "az account set -s <subscription_id>", "az account show", "{ \"environmentName\": \"AzureCloud\", \"id\": \"9xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"isDefault\": true, \"name\": \"Subscription Name 2\", \"state\": \"Enabled\", \"tenantId\": \"7xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"user\": { \"name\": \"[email protected]\", \"type\": \"user\" } }", "az ad sp create-for-rbac --role <role_name> \\ 1 --name <service_principal> \\ 2 --scopes /subscriptions/<subscription_id> 3", "Creating 'Contributor' role assignment under scope '/subscriptions/<subscription_id>' The output includes credentials that you must protect. Be sure that you do not include these credentials in your code or check the credentials into your source control. For more information, see https://aka.ms/azadsp-cli { \"appId\": \"axxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\", \"displayName\": <service_principal>\", \"password\": \"00000000-0000-0000-0000-000000000000\", \"tenantId\": \"8xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\" }", "az role assignment create --role \"User Access Administrator\" --assignee-object-id USD(az ad sp show --id <appId> --query id -o tsv) 1 --scope /subscriptions/<subscription_id> 2", "az vm image list --all --offer rh-ocp-worker --publisher redhat -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- ----------------- rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocp-worker:4.15.2024072409 4.15.2024072409 rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.15.2024072409 4.15.2024072409", "az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- ----------------- rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.15.2024072409 4.15.2024072409 rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.15.2024072409 4.15.2024072409", "az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "\"plan\" : { \"name\": \"rh-ocp-worker\", \"product\": \"rh-ocp-worker\", \"publisher\": \"redhat\" }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]\" ], \"properties\" : { \"storageProfile\": { \"imageReference\": { \"offer\": \"rh-ocp-worker\", \"publisher\": \"redhat\", \"sku\": \"rh-ocp-worker\", \"version\": \"413.92.2023101700\" } } }", "tar -xvf openshift-install-linux.tar.gz", "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "mkdir USDHOME/clusterconfig", "openshift-install create manifests --dir USDHOME/clusterconfig", "? SSH Public Key INFO Credentials loaded from the \"myprofile\" profile in file \"/home/myuser/.aws/credentials\" INFO Consuming Install Config from target directory INFO Manifests created in: USDHOME/clusterconfig/manifests and USDHOME/clusterconfig/openshift", "ls USDHOME/clusterconfig/openshift/", "99_kubeadmin-password-secret.yaml 99_openshift-cluster-api_master-machines-0.yaml 99_openshift-cluster-api_master-machines-1.yaml 99_openshift-cluster-api_master-machines-2.yaml", "variant: openshift version: 4.17.0 metadata: labels: machineconfiguration.openshift.io/role: worker name: 98-var-partition storage: disks: - device: /dev/disk/by-id/<device_name> 1 partitions: - label: var start_mib: <partition_start_offset> 2 size_mib: <partition_size> 3 number: 5 filesystems: - device: /dev/disk/by-partlabel/var path: /var format: xfs mount_options: [defaults, prjquota] 4 with_mount_unit: true", "butane USDHOME/clusterconfig/98-var-partition.bu -o USDHOME/clusterconfig/openshift/98-var-partition.yaml", "openshift-install create ignition-configs --dir USDHOME/clusterconfig ls USDHOME/clusterconfig/ auth bootstrap.ign master.ign metadata.json worker.ign", "./openshift-install create install-config --dir <installation_directory> 1", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "export CLUSTER_NAME=<cluster_name> 1 export AZURE_REGION=<azure_region> 2 export SSH_KEY=<ssh_key> 3 export BASE_DOMAIN=<base_domain> 4 export BASE_DOMAIN_RESOURCE_GROUP=<base_domain_resource_group> 5", "export CLUSTER_NAME=test-cluster export AZURE_REGION=centralus export SSH_KEY=\"ssh-rsa xxx/xxx/xxx= [email protected]\" export BASE_DOMAIN=example.com export BASE_DOMAIN_RESOURCE_GROUP=ocp-cluster", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "./openshift-install create manifests --dir <installation_directory> 1", "rm -f <installation_directory>/openshift/99_openshift-cluster-api_master-machines-*.yaml", "rm -f <installation_directory>/openshift/99_openshift-machine-api_master-control-plane-machine-set.yaml", "rm -f <installation_directory>/openshift/99_openshift-cluster-api_worker-machineset-*.yaml", "apiVersion: config.openshift.io/v1 kind: DNS metadata: creationTimestamp: null name: cluster spec: baseDomain: example.openshift.com privateZone: 1 id: mycluster-100419-private-zone publicZone: 2 id: example.openshift.com status: {}", "export INFRA_ID=<infra_id> 1", "export RESOURCE_GROUP=<resource_group> 1", "./openshift-install create ignition-configs --dir <installation_directory> 1", ". ├── auth │ ├── kubeadmin-password │ └── kubeconfig ├── bootstrap.ign ├── master.ign ├── metadata.json └── worker.ign", "az group create --name USD{RESOURCE_GROUP} --location USD{AZURE_REGION}", "az identity create -g USD{RESOURCE_GROUP} -n USD{INFRA_ID}-identity", "export PRINCIPAL_ID=`az identity show -g USD{RESOURCE_GROUP} -n USD{INFRA_ID}-identity --query principalId --out tsv`", "export RESOURCE_GROUP_ID=`az group show -g USD{RESOURCE_GROUP} --query id --out tsv`", "az role assignment create --assignee \"USD{PRINCIPAL_ID}\" --role 'Contributor' --scope \"USD{RESOURCE_GROUP_ID}\"", "az role assignment create --assignee \"USD{PRINCIPAL_ID}\" --role <custom_role> \\ 1 --scope \"USD{RESOURCE_GROUP_ID}\"", "az storage account create -g USD{RESOURCE_GROUP} --location USD{AZURE_REGION} --name USD{CLUSTER_NAME}sa --kind Storage --sku Standard_LRS", "export ACCOUNT_KEY=`az storage account keys list -g USD{RESOURCE_GROUP} --account-name USD{CLUSTER_NAME}sa --query \"[0].value\" -o tsv`", "export VHD_URL=`openshift-install coreos print-stream-json | jq -r '.architectures.<architecture>.\"rhel-coreos-extensions\".\"azure-disk\".url'`", "az storage container create --name vhd --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY}", "az storage blob copy start --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} --destination-blob \"rhcos.vhd\" --destination-container vhd --source-uri \"USD{VHD_URL}\"", "az storage container create --name files --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY}", "az storage blob upload --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} -c \"files\" -f \"<installation_directory>/bootstrap.ign\" -n \"bootstrap.ign\"", "az network dns zone create -g USD{BASE_DOMAIN_RESOURCE_GROUP} -n USD{CLUSTER_NAME}.USD{BASE_DOMAIN}", "az network private-dns zone create -g USD{RESOURCE_GROUP} -n USD{CLUSTER_NAME}.USD{BASE_DOMAIN}", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/01_vnet.json\" --parameters baseName=\"USD{INFRA_ID}\" 1", "az network private-dns link vnet create -g USD{RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n USD{INFRA_ID}-network-link -v \"USD{INFRA_ID}-vnet\" -e false", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(parameters('baseName'), '-vnet')]\", \"addressPrefix\" : \"10.0.0.0/16\", \"masterSubnetName\" : \"[concat(parameters('baseName'), '-master-subnet')]\", \"masterSubnetPrefix\" : \"10.0.0.0/24\", \"nodeSubnetName\" : \"[concat(parameters('baseName'), '-worker-subnet')]\", \"nodeSubnetPrefix\" : \"10.0.1.0/24\", \"clusterNsgName\" : \"[concat(parameters('baseName'), '-nsg')]\" }, \"resources\" : [ { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/virtualNetworks\", \"name\" : \"[variables('virtualNetworkName')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[concat('Microsoft.Network/networkSecurityGroups/', variables('clusterNsgName'))]\" ], \"properties\" : { \"addressSpace\" : { \"addressPrefixes\" : [ \"[variables('addressPrefix')]\" ] }, \"subnets\" : [ { \"name\" : \"[variables('masterSubnetName')]\", \"properties\" : { \"addressPrefix\" : \"[variables('masterSubnetPrefix')]\", \"serviceEndpoints\": [], \"networkSecurityGroup\" : { \"id\" : \"[resourceId('Microsoft.Network/networkSecurityGroups', variables('clusterNsgName'))]\" } } }, { \"name\" : \"[variables('nodeSubnetName')]\", \"properties\" : { \"addressPrefix\" : \"[variables('nodeSubnetPrefix')]\", \"serviceEndpoints\": [], \"networkSecurityGroup\" : { \"id\" : \"[resourceId('Microsoft.Network/networkSecurityGroups', variables('clusterNsgName'))]\" } } } ] } }, { \"type\" : \"Microsoft.Network/networkSecurityGroups\", \"name\" : \"[variables('clusterNsgName')]\", \"apiVersion\" : \"2018-10-01\", \"location\" : \"[variables('location')]\", \"properties\" : { \"securityRules\" : [ { \"name\" : \"apiserver_in\", \"properties\" : { \"protocol\" : \"Tcp\", \"sourcePortRange\" : \"*\", \"destinationPortRange\" : \"6443\", \"sourceAddressPrefix\" : \"*\", \"destinationAddressPrefix\" : \"*\", \"access\" : \"Allow\", \"priority\" : 101, \"direction\" : \"Inbound\" } } ] } } ] }", "export VHD_BLOB_URL=`az storage blob url --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} -c vhd -n \"rhcos.vhd\" -o tsv`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/02_storage.json\" --parameters vhdBlobURL=\"USD{VHD_BLOB_URL}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameters storageAccount=\"USD{CLUSTER_NAME}sa\" \\ 3 --parameters architecture=\"<architecture>\" 4", "{ \"USDschema\": \"https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#\", \"contentVersion\": \"1.0.0.0\", \"parameters\": { \"architecture\": { \"type\": \"string\", \"metadata\": { \"description\": \"The architecture of the Virtual Machines\" }, \"defaultValue\": \"x64\", \"allowedValues\": [ \"Arm64\", \"x64\" ] }, \"baseName\": { \"type\": \"string\", \"minLength\": 1, \"metadata\": { \"description\": \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"storageAccount\": { \"type\": \"string\", \"metadata\": { \"description\": \"The Storage Account name\" } }, \"vhdBlobURL\": { \"type\": \"string\", \"metadata\": { \"description\": \"URL pointing to the blob where the VHD to be used to create master and worker machines is located\" } } }, \"variables\": { \"location\": \"[resourceGroup().location]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\": \"[parameters('baseName')]\", \"imageNameGen2\": \"[concat(parameters('baseName'), '-gen2')]\", \"imageRelease\": \"1.0.0\" }, \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"Microsoft.Compute/galleries\", \"name\": \"[variables('galleryName')]\", \"location\": \"[variables('location')]\", \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"images\", \"name\": \"[variables('imageName')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('galleryName')]\" ], \"properties\": { \"architecture\": \"[parameters('architecture')]\", \"hyperVGeneration\": \"V1\", \"identifier\": { \"offer\": \"rhcos\", \"publisher\": \"RedHat\", \"sku\": \"basic\" }, \"osState\": \"Generalized\", \"osType\": \"Linux\" }, \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"versions\", \"name\": \"[variables('imageRelease')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('imageName')]\" ], \"properties\": { \"publishingProfile\": { \"storageAccountType\": \"Standard_LRS\", \"targetRegions\": [ { \"name\": \"[variables('location')]\", \"regionalReplicaCount\": \"1\" } ] }, \"storageProfile\": { \"osDiskImage\": { \"source\": { \"id\": \"[resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccount'))]\", \"uri\": \"[parameters('vhdBlobURL')]\" } } } } } ] }, { \"apiVersion\": \"2021-10-01\", \"type\": \"images\", \"name\": \"[variables('imageNameGen2')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('galleryName')]\" ], \"properties\": { \"architecture\": \"[parameters('architecture')]\", \"hyperVGeneration\": \"V2\", \"identifier\": { \"offer\": \"rhcos-gen2\", \"publisher\": \"RedHat-gen2\", \"sku\": \"gen2\" }, \"osState\": \"Generalized\", \"osType\": \"Linux\" }, \"resources\": [ { \"apiVersion\": \"2021-10-01\", \"type\": \"versions\", \"name\": \"[variables('imageRelease')]\", \"location\": \"[variables('location')]\", \"dependsOn\": [ \"[variables('imageNameGen2')]\" ], \"properties\": { \"publishingProfile\": { \"storageAccountType\": \"Standard_LRS\", \"targetRegions\": [ { \"name\": \"[variables('location')]\", \"regionalReplicaCount\": \"1\" } ] }, \"storageProfile\": { \"osDiskImage\": { \"source\": { \"id\": \"[resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccount'))]\", \"uri\": \"[parameters('vhdBlobURL')]\" } } } } } ] } ] } ] }", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/03_infra.json\" --parameters privateDNSZoneName=\"USD{CLUSTER_NAME}.USD{BASE_DOMAIN}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" 2", "export PUBLIC_IP=`az network public-ip list -g USD{RESOURCE_GROUP} --query \"[?name=='USD{INFRA_ID}-master-pip'] | [0].ipAddress\" -o tsv`", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n api -a USD{PUBLIC_IP} --ttl 60", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{BASE_DOMAIN} -n api.USD{CLUSTER_NAME} -a USD{PUBLIC_IP} --ttl 60", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"privateDNSZoneName\" : { \"type\" : \"string\", \"metadata\" : { \"description\" : \"Name of the private DNS zone\" } } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"masterSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-master-subnet')]\", \"masterSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]\", \"masterPublicIpAddressName\" : \"[concat(parameters('baseName'), '-master-pip')]\", \"masterPublicIpAddressID\" : \"[resourceId('Microsoft.Network/publicIPAddresses', variables('masterPublicIpAddressName'))]\", \"masterLoadBalancerName\" : \"[parameters('baseName')]\", \"masterLoadBalancerID\" : \"[resourceId('Microsoft.Network/loadBalancers', variables('masterLoadBalancerName'))]\", \"internalLoadBalancerName\" : \"[concat(parameters('baseName'), '-internal-lb')]\", \"internalLoadBalancerID\" : \"[resourceId('Microsoft.Network/loadBalancers', variables('internalLoadBalancerName'))]\", \"skuName\": \"Standard\" }, \"resources\" : [ { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/publicIPAddresses\", \"name\" : \"[variables('masterPublicIpAddressName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"[variables('skuName')]\" }, \"properties\" : { \"publicIPAllocationMethod\" : \"Static\", \"dnsSettings\" : { \"domainNameLabel\" : \"[variables('masterPublicIpAddressName')]\" } } }, { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/loadBalancers\", \"name\" : \"[variables('masterLoadBalancerName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"[variables('skuName')]\" }, \"dependsOn\" : [ \"[concat('Microsoft.Network/publicIPAddresses/', variables('masterPublicIpAddressName'))]\" ], \"properties\" : { \"frontendIPConfigurations\" : [ { \"name\" : \"public-lb-ip-v4\", \"properties\" : { \"publicIPAddress\" : { \"id\" : \"[variables('masterPublicIpAddressID')]\" } } } ], \"backendAddressPools\" : [ { \"name\" : \"[variables('masterLoadBalancerName')]\" } ], \"loadBalancingRules\" : [ { \"name\" : \"api-internal\", \"properties\" : { \"frontendIPConfiguration\" : { \"id\" :\"[concat(variables('masterLoadBalancerID'), '/frontendIPConfigurations/public-lb-ip-v4')]\" }, \"backendAddressPool\" : { \"id\" : \"[concat(variables('masterLoadBalancerID'), '/backendAddressPools/', variables('masterLoadBalancerName'))]\" }, \"protocol\" : \"Tcp\", \"loadDistribution\" : \"Default\", \"idleTimeoutInMinutes\" : 30, \"frontendPort\" : 6443, \"backendPort\" : 6443, \"probe\" : { \"id\" : \"[concat(variables('masterLoadBalancerID'), '/probes/api-internal-probe')]\" } } } ], \"probes\" : [ { \"name\" : \"api-internal-probe\", \"properties\" : { \"protocol\" : \"Https\", \"port\" : 6443, \"requestPath\": \"/readyz\", \"intervalInSeconds\" : 10, \"numberOfProbes\" : 3 } } ] } }, { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/loadBalancers\", \"name\" : \"[variables('internalLoadBalancerName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"[variables('skuName')]\" }, \"properties\" : { \"frontendIPConfigurations\" : [ { \"name\" : \"internal-lb-ip\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"subnet\" : { \"id\" : \"[variables('masterSubnetRef')]\" }, \"privateIPAddressVersion\" : \"IPv4\" } } ], \"backendAddressPools\" : [ { \"name\" : \"internal-lb-backend\" } ], \"loadBalancingRules\" : [ { \"name\" : \"api-internal\", \"properties\" : { \"frontendIPConfiguration\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/frontendIPConfigurations/internal-lb-ip')]\" }, \"frontendPort\" : 6443, \"backendPort\" : 6443, \"enableFloatingIP\" : false, \"idleTimeoutInMinutes\" : 30, \"protocol\" : \"Tcp\", \"enableTcpReset\" : false, \"loadDistribution\" : \"Default\", \"backendAddressPool\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/backendAddressPools/internal-lb-backend')]\" }, \"probe\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/probes/api-internal-probe')]\" } } }, { \"name\" : \"sint\", \"properties\" : { \"frontendIPConfiguration\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/frontendIPConfigurations/internal-lb-ip')]\" }, \"frontendPort\" : 22623, \"backendPort\" : 22623, \"enableFloatingIP\" : false, \"idleTimeoutInMinutes\" : 30, \"protocol\" : \"Tcp\", \"enableTcpReset\" : false, \"loadDistribution\" : \"Default\", \"backendAddressPool\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/backendAddressPools/internal-lb-backend')]\" }, \"probe\" : { \"id\" : \"[concat(variables('internalLoadBalancerID'), '/probes/sint-probe')]\" } } } ], \"probes\" : [ { \"name\" : \"api-internal-probe\", \"properties\" : { \"protocol\" : \"Https\", \"port\" : 6443, \"requestPath\": \"/readyz\", \"intervalInSeconds\" : 10, \"numberOfProbes\" : 3 } }, { \"name\" : \"sint-probe\", \"properties\" : { \"protocol\" : \"Https\", \"port\" : 22623, \"requestPath\": \"/healthz\", \"intervalInSeconds\" : 10, \"numberOfProbes\" : 3 } } ] } }, { \"apiVersion\": \"2018-09-01\", \"type\": \"Microsoft.Network/privateDnsZones/A\", \"name\": \"[concat(parameters('privateDNSZoneName'), '/api')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[concat('Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'))]\" ], \"properties\": { \"ttl\": 60, \"aRecords\": [ { \"ipv4Address\": \"[reference(variables('internalLoadBalancerName')).frontendIPConfigurations[0].properties.privateIPAddress]\" } ] } }, { \"apiVersion\": \"2018-09-01\", \"type\": \"Microsoft.Network/privateDnsZones/A\", \"name\": \"[concat(parameters('privateDNSZoneName'), '/api-int')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[concat('Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'))]\" ], \"properties\": { \"ttl\": 60, \"aRecords\": [ { \"ipv4Address\": \"[reference(variables('internalLoadBalancerName')).frontendIPConfigurations[0].properties.privateIPAddress]\" } ] } } ] }", "bootstrap_url_expiry=`date -u -d \"10 hours\" '+%Y-%m-%dT%H:%MZ'`", "export BOOTSTRAP_URL=`az storage blob generate-sas -c 'files' -n 'bootstrap.ign' --https-only --full-uri --permissions r --expiry USDbootstrap_url_expiry --account-name USD{CLUSTER_NAME}sa --account-key USD{ACCOUNT_KEY} -o tsv`", "export BOOTSTRAP_IGNITION=`jq -rcnM --arg v \"3.2.0\" --arg url USD{BOOTSTRAP_URL} '{ignition:{version:USDv,config:{replace:{source:USDurl}}}}' | base64 | tr -d '\\n'`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/04_bootstrap.json\" --parameters bootstrapIgnition=\"USD{BOOTSTRAP_IGNITION}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameter bootstrapVMSize=\"Standard_D4s_v3\" 3", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"bootstrapIgnition\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Bootstrap ignition content for the bootstrap cluster\" } }, \"sshKeyData\" : { \"type\" : \"securestring\", \"defaultValue\" : \"Unused\", \"metadata\" : { \"description\" : \"Unused\" } }, \"bootstrapVMSize\" : { \"type\" : \"string\", \"defaultValue\" : \"Standard_D4s_v3\", \"metadata\" : { \"description\" : \"The size of the Bootstrap Virtual Machine\" } }, \"hyperVGen\": { \"type\": \"string\", \"metadata\": { \"description\": \"VM generation image to use\" }, \"defaultValue\": \"V2\", \"allowedValues\": [ \"V1\", \"V2\" ] } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"masterSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-master-subnet')]\", \"masterSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]\", \"masterLoadBalancerName\" : \"[parameters('baseName')]\", \"internalLoadBalancerName\" : \"[concat(parameters('baseName'), '-internal-lb')]\", \"sshKeyPath\" : \"/home/core/.ssh/authorized_keys\", \"identityName\" : \"[concat(parameters('baseName'), '-identity')]\", \"vmName\" : \"[concat(parameters('baseName'), '-bootstrap')]\", \"nicName\" : \"[concat(variables('vmName'), '-nic')]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\" : \"[concat(parameters('baseName'), if(equals(parameters('hyperVGen'), 'V2'), '-gen2', ''))]\", \"clusterNsgName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-nsg')]\", \"sshPublicIpAddressName\" : \"[concat(variables('vmName'), '-ssh-pip')]\" }, \"resources\" : [ { \"apiVersion\" : \"2018-12-01\", \"type\" : \"Microsoft.Network/publicIPAddresses\", \"name\" : \"[variables('sshPublicIpAddressName')]\", \"location\" : \"[variables('location')]\", \"sku\": { \"name\": \"Standard\" }, \"properties\" : { \"publicIPAllocationMethod\" : \"Static\", \"dnsSettings\" : { \"domainNameLabel\" : \"[variables('sshPublicIpAddressName')]\" } } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Network/networkInterfaces\", \"name\" : \"[variables('nicName')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[resourceId('Microsoft.Network/publicIPAddresses', variables('sshPublicIpAddressName'))]\" ], \"properties\" : { \"ipConfigurations\" : [ { \"name\" : \"pipConfig\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"publicIPAddress\": { \"id\": \"[resourceId('Microsoft.Network/publicIPAddresses', variables('sshPublicIpAddressName'))]\" }, \"subnet\" : { \"id\" : \"[variables('masterSubnetRef')]\" }, \"loadBalancerBackendAddressPools\" : [ { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('masterLoadBalancerName'), '/backendAddressPools/', variables('masterLoadBalancerName'))]\" }, { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'), '/backendAddressPools/internal-lb-backend')]\" } ] } } ] } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Compute/virtualMachines\", \"name\" : \"[variables('vmName')]\", \"location\" : \"[variables('location')]\", \"identity\" : { \"type\" : \"userAssigned\", \"userAssignedIdentities\" : { \"[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]\" : {} } }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]\" ], \"properties\" : { \"hardwareProfile\" : { \"vmSize\" : \"[parameters('bootstrapVMSize')]\" }, \"osProfile\" : { \"computerName\" : \"[variables('vmName')]\", \"adminUsername\" : \"core\", \"adminPassword\" : \"NotActuallyApplied!\", \"customData\" : \"[parameters('bootstrapIgnition')]\", \"linuxConfiguration\" : { \"disablePasswordAuthentication\" : false } }, \"storageProfile\" : { \"imageReference\": { \"id\": \"[resourceId('Microsoft.Compute/galleries/images', variables('galleryName'), variables('imageName'))]\" }, \"osDisk\" : { \"name\": \"[concat(variables('vmName'),'_OSDisk')]\", \"osType\" : \"Linux\", \"createOption\" : \"FromImage\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\" }, \"diskSizeGB\" : 100 } }, \"networkProfile\" : { \"networkInterfaces\" : [ { \"id\" : \"[resourceId('Microsoft.Network/networkInterfaces', variables('nicName'))]\" } ] } } }, { \"apiVersion\" : \"2018-06-01\", \"type\": \"Microsoft.Network/networkSecurityGroups/securityRules\", \"name\" : \"[concat(variables('clusterNsgName'), '/bootstrap_ssh_in')]\", \"location\" : \"[variables('location')]\", \"dependsOn\" : [ \"[resourceId('Microsoft.Compute/virtualMachines', variables('vmName'))]\" ], \"properties\": { \"protocol\" : \"Tcp\", \"sourcePortRange\" : \"*\", \"destinationPortRange\" : \"22\", \"sourceAddressPrefix\" : \"*\", \"destinationAddressPrefix\" : \"*\", \"access\" : \"Allow\", \"priority\" : 100, \"direction\" : \"Inbound\" } } ] }", "export MASTER_IGNITION=`cat <installation_directory>/master.ign | base64 | tr -d '\\n'`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/05_masters.json\" --parameters masterIgnition=\"USD{MASTER_IGNITION}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameters masterVMSize=\"Standard_D8s_v3\" 3", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"masterIgnition\" : { \"type\" : \"string\", \"metadata\" : { \"description\" : \"Ignition content for the master nodes\" } }, \"numberOfMasters\" : { \"type\" : \"int\", \"defaultValue\" : 3, \"minValue\" : 2, \"maxValue\" : 30, \"metadata\" : { \"description\" : \"Number of OpenShift masters to deploy\" } }, \"sshKeyData\" : { \"type\" : \"securestring\", \"defaultValue\" : \"Unused\", \"metadata\" : { \"description\" : \"Unused\" } }, \"privateDNSZoneName\" : { \"type\" : \"string\", \"defaultValue\" : \"\", \"metadata\" : { \"description\" : \"unused\" } }, \"masterVMSize\" : { \"type\" : \"string\", \"defaultValue\" : \"Standard_D8s_v3\", \"metadata\" : { \"description\" : \"The size of the Master Virtual Machines\" } }, \"diskSizeGB\" : { \"type\" : \"int\", \"defaultValue\" : 1024, \"metadata\" : { \"description\" : \"Size of the Master VM OS disk, in GB\" } }, \"hyperVGen\": { \"type\": \"string\", \"metadata\": { \"description\": \"VM generation image to use\" }, \"defaultValue\": \"V2\", \"allowedValues\": [ \"V1\", \"V2\" ] } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"masterSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-master-subnet')]\", \"masterSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('masterSubnetName'))]\", \"masterLoadBalancerName\" : \"[parameters('baseName')]\", \"internalLoadBalancerName\" : \"[concat(parameters('baseName'), '-internal-lb')]\", \"sshKeyPath\" : \"/home/core/.ssh/authorized_keys\", \"identityName\" : \"[concat(parameters('baseName'), '-identity')]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\" : \"[concat(parameters('baseName'), if(equals(parameters('hyperVGen'), 'V2'), '-gen2', ''))]\", \"copy\" : [ { \"name\" : \"vmNames\", \"count\" : \"[parameters('numberOfMasters')]\", \"input\" : \"[concat(parameters('baseName'), '-master-', copyIndex('vmNames'))]\" } ] }, \"resources\" : [ { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Network/networkInterfaces\", \"copy\" : { \"name\" : \"nicCopy\", \"count\" : \"[length(variables('vmNames'))]\" }, \"name\" : \"[concat(variables('vmNames')[copyIndex()], '-nic')]\", \"location\" : \"[variables('location')]\", \"properties\" : { \"ipConfigurations\" : [ { \"name\" : \"pipConfig\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"subnet\" : { \"id\" : \"[variables('masterSubnetRef')]\" }, \"loadBalancerBackendAddressPools\" : [ { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('masterLoadBalancerName'), '/backendAddressPools/', variables('masterLoadBalancerName'))]\" }, { \"id\" : \"[concat('/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('internalLoadBalancerName'), '/backendAddressPools/internal-lb-backend')]\" } ] } } ] } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Compute/virtualMachines\", \"copy\" : { \"name\" : \"vmCopy\", \"count\" : \"[length(variables('vmNames'))]\" }, \"name\" : \"[variables('vmNames')[copyIndex()]]\", \"location\" : \"[variables('location')]\", \"identity\" : { \"type\" : \"userAssigned\", \"userAssignedIdentities\" : { \"[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]\" : {} } }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]\" ], \"properties\" : { \"hardwareProfile\" : { \"vmSize\" : \"[parameters('masterVMSize')]\" }, \"osProfile\" : { \"computerName\" : \"[variables('vmNames')[copyIndex()]]\", \"adminUsername\" : \"core\", \"adminPassword\" : \"NotActuallyApplied!\", \"customData\" : \"[parameters('masterIgnition')]\", \"linuxConfiguration\" : { \"disablePasswordAuthentication\" : false } }, \"storageProfile\" : { \"imageReference\": { \"id\": \"[resourceId('Microsoft.Compute/galleries/images', variables('galleryName'), variables('imageName'))]\" }, \"osDisk\" : { \"name\": \"[concat(variables('vmNames')[copyIndex()], '_OSDisk')]\", \"osType\" : \"Linux\", \"createOption\" : \"FromImage\", \"caching\": \"ReadOnly\", \"writeAcceleratorEnabled\": false, \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\" }, \"diskSizeGB\" : \"[parameters('diskSizeGB')]\" } }, \"networkProfile\" : { \"networkInterfaces\" : [ { \"id\" : \"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNames')[copyIndex()], '-nic'))]\", \"properties\": { \"primary\": false } } ] } } } ] }", "./openshift-install wait-for bootstrap-complete --dir <installation_directory> \\ 1 --log-level info 2", "az network nsg rule delete -g USD{RESOURCE_GROUP} --nsg-name USD{INFRA_ID}-nsg --name bootstrap_ssh_in az vm stop -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap az vm deallocate -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap az vm delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap --yes az disk delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap_OSDisk --no-wait --yes az network nic delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap-nic --no-wait az storage blob delete --account-key USD{ACCOUNT_KEY} --account-name USD{CLUSTER_NAME}sa --container-name files --name bootstrap.ign az network public-ip delete -g USD{RESOURCE_GROUP} --name USD{INFRA_ID}-bootstrap-ssh-pip", "export WORKER_IGNITION=`cat <installation_directory>/worker.ign | base64 | tr -d '\\n'`", "az deployment group create -g USD{RESOURCE_GROUP} --template-file \"<installation_directory>/06_workers.json\" --parameters workerIgnition=\"USD{WORKER_IGNITION}\" \\ 1 --parameters baseName=\"USD{INFRA_ID}\" \\ 2 --parameters nodeVMSize=\"Standard_D4s_v3\" 3", "{ \"USDschema\" : \"https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"parameters\" : { \"baseName\" : { \"type\" : \"string\", \"minLength\" : 1, \"metadata\" : { \"description\" : \"Base name to be used in resource names (usually the cluster's Infra ID)\" } }, \"vnetBaseName\": { \"type\": \"string\", \"defaultValue\": \"\", \"metadata\" : { \"description\" : \"The specific customer vnet's base name (optional)\" } }, \"workerIgnition\" : { \"type\" : \"string\", \"metadata\" : { \"description\" : \"Ignition content for the worker nodes\" } }, \"numberOfNodes\" : { \"type\" : \"int\", \"defaultValue\" : 3, \"minValue\" : 2, \"maxValue\" : 30, \"metadata\" : { \"description\" : \"Number of OpenShift compute nodes to deploy\" } }, \"sshKeyData\" : { \"type\" : \"securestring\", \"defaultValue\" : \"Unused\", \"metadata\" : { \"description\" : \"Unused\" } }, \"nodeVMSize\" : { \"type\" : \"string\", \"defaultValue\" : \"Standard_D4s_v3\", \"metadata\" : { \"description\" : \"The size of the each Node Virtual Machine\" } }, \"hyperVGen\": { \"type\": \"string\", \"metadata\": { \"description\": \"VM generation image to use\" }, \"defaultValue\": \"V2\", \"allowedValues\": [ \"V1\", \"V2\" ] } }, \"variables\" : { \"location\" : \"[resourceGroup().location]\", \"virtualNetworkName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-vnet')]\", \"virtualNetworkID\" : \"[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]\", \"nodeSubnetName\" : \"[concat(if(not(empty(parameters('vnetBaseName'))), parameters('vnetBaseName'), parameters('baseName')), '-worker-subnet')]\", \"nodeSubnetRef\" : \"[concat(variables('virtualNetworkID'), '/subnets/', variables('nodeSubnetName'))]\", \"infraLoadBalancerName\" : \"[parameters('baseName')]\", \"sshKeyPath\" : \"/home/capi/.ssh/authorized_keys\", \"identityName\" : \"[concat(parameters('baseName'), '-identity')]\", \"galleryName\": \"[concat('gallery_', replace(parameters('baseName'), '-', '_'))]\", \"imageName\" : \"[concat(parameters('baseName'), if(equals(parameters('hyperVGen'), 'V2'), '-gen2', ''))]\", \"copy\" : [ { \"name\" : \"vmNames\", \"count\" : \"[parameters('numberOfNodes')]\", \"input\" : \"[concat(parameters('baseName'), '-worker-', variables('location'), '-', copyIndex('vmNames', 1))]\" } ] }, \"resources\" : [ { \"apiVersion\" : \"2019-05-01\", \"name\" : \"[concat('node', copyIndex())]\", \"type\" : \"Microsoft.Resources/deployments\", \"copy\" : { \"name\" : \"nodeCopy\", \"count\" : \"[length(variables('vmNames'))]\" }, \"properties\" : { \"mode\" : \"Incremental\", \"template\" : { \"USDschema\" : \"http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#\", \"contentVersion\" : \"1.0.0.0\", \"resources\" : [ { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Network/networkInterfaces\", \"name\" : \"[concat(variables('vmNames')[copyIndex()], '-nic')]\", \"location\" : \"[variables('location')]\", \"properties\" : { \"ipConfigurations\" : [ { \"name\" : \"pipConfig\", \"properties\" : { \"privateIPAllocationMethod\" : \"Dynamic\", \"subnet\" : { \"id\" : \"[variables('nodeSubnetRef')]\" } } } ] } }, { \"apiVersion\" : \"2018-06-01\", \"type\" : \"Microsoft.Compute/virtualMachines\", \"name\" : \"[variables('vmNames')[copyIndex()]]\", \"location\" : \"[variables('location')]\", \"tags\" : { \"kubernetes.io-cluster-ffranzupi\": \"owned\" }, \"identity\" : { \"type\" : \"userAssigned\", \"userAssignedIdentities\" : { \"[resourceID('Microsoft.ManagedIdentity/userAssignedIdentities/', variables('identityName'))]\" : {} } }, \"dependsOn\" : [ \"[concat('Microsoft.Network/networkInterfaces/', concat(variables('vmNames')[copyIndex()], '-nic'))]\" ], \"properties\" : { \"hardwareProfile\" : { \"vmSize\" : \"[parameters('nodeVMSize')]\" }, \"osProfile\" : { \"computerName\" : \"[variables('vmNames')[copyIndex()]]\", \"adminUsername\" : \"capi\", \"adminPassword\" : \"NotActuallyApplied!\", \"customData\" : \"[parameters('workerIgnition')]\", \"linuxConfiguration\" : { \"disablePasswordAuthentication\" : false } }, \"storageProfile\" : { \"imageReference\": { \"id\": \"[resourceId('Microsoft.Compute/galleries/images', variables('galleryName'), variables('imageName'))]\" }, \"osDisk\" : { \"name\": \"[concat(variables('vmNames')[copyIndex()],'_OSDisk')]\", \"osType\" : \"Linux\", \"createOption\" : \"FromImage\", \"managedDisk\": { \"storageAccountType\": \"Premium_LRS\" }, \"diskSizeGB\": 128 } }, \"networkProfile\" : { \"networkInterfaces\" : [ { \"id\" : \"[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNames')[copyIndex()], '-nic'))]\", \"properties\": { \"primary\": true } } ] } } } ] } } } ] }", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 63m v1.30.3 master-1 Ready master 63m v1.30.3 master-2 Ready master 64m v1.30.3", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-8b2br 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending csr-8vnps 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-bfd72 5m26s system:node:ip-10-0-50-126.us-east-2.compute.internal Pending csr-c57lv 5m26s system:node:ip-10-0-95-157.us-east-2.compute.internal Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs oc adm certificate approve", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 73m v1.30.3 master-1 Ready master 73m v1.30.3 master-2 Ready master 74m v1.30.3 worker-0 Ready worker 11m v1.30.3 worker-1 Ready worker 11m v1.30.3", "oc -n openshift-ingress get service router-default", "NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE router-default LoadBalancer 172.30.20.10 35.130.120.110 80:32288/TCP,443:31215/TCP 20", "export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print USD4}'`", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n *.apps -a USD{PUBLIC_IP_ROUTER} --ttl 300", "az network dns record-set a add-record -g USD{BASE_DOMAIN_RESOURCE_GROUP} -z USD{BASE_DOMAIN} -n *.apps.USD{CLUSTER_NAME} -a USD{PUBLIC_IP_ROUTER} --ttl 300", "az network private-dns record-set a create -g USD{RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n *.apps --ttl 300", "az network private-dns record-set a add-record -g USD{RESOURCE_GROUP} -z USD{CLUSTER_NAME}.USD{BASE_DOMAIN} -n *.apps -a USD{PUBLIC_IP_ROUTER}", "oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{\"\\n\"}{end}{end}' routes", "oauth-openshift.apps.cluster.basedomain.com console-openshift-console.apps.cluster.basedomain.com downloads-openshift-console.apps.cluster.basedomain.com alertmanager-main-openshift-monitoring.apps.cluster.basedomain.com prometheus-k8s-openshift-monitoring.apps.cluster.basedomain.com", "./openshift-install --dir <installation_directory> wait-for install-complete 1", "INFO Waiting up to 30m0s for the cluster to initialize", "apiVersion: v1 baseDomain: example.com compute: - name: worker platform: {} replicas: 0", "apiVersion: config.openshift.io/v1 kind: Scheduler metadata: creationTimestamp: null name: cluster spec: mastersSchedulable: true policy: name: \"\" status: {}", "./openshift-install destroy cluster --dir <installation_directory> --log-level info 1 2", "ccoctl azure delete --name=<name> \\ 1 --region=<azure_region> \\ 2 --subscription-id=<azure_subscription_id> \\ 3 --delete-oidc-resource-group", "apiVersion:", "baseDomain:", "metadata:", "metadata: name:", "platform:", "pullSecret:", "{ \"auths\":{ \"cloud.openshift.com\":{ \"auth\":\"b3Blb=\", \"email\":\"[email protected]\" }, \"quay.io\":{ \"auth\":\"b3Blb=\", \"email\":\"[email protected]\" } } }", "networking:", "networking: networkType:", "networking: clusterNetwork:", "networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23", "networking: clusterNetwork: cidr:", "networking: clusterNetwork: hostPrefix:", "networking: serviceNetwork:", "networking: serviceNetwork: - 172.30.0.0/16", "networking: machineNetwork:", "networking: machineNetwork: - cidr: 10.0.0.0/16", "networking: machineNetwork: cidr:", "additionalTrustBundle:", "capabilities:", "capabilities: baselineCapabilitySet:", "capabilities: additionalEnabledCapabilities:", "cpuPartitioningMode:", "compute:", "compute: architecture:", "compute: hyperthreading:", "compute: name:", "compute: platform:", "compute: replicas:", "featureSet:", "controlPlane:", "controlPlane: architecture:", "controlPlane: hyperthreading:", "controlPlane: name:", "controlPlane: platform:", "controlPlane: replicas:", "credentialsMode:", "fips:", "imageContentSources:", "imageContentSources: source:", "imageContentSources: mirrors:", "publish:", "sshKey:", "compute: platform: azure: encryptionAtHost:", "compute: platform: azure: osDisk: diskSizeGB:", "compute: platform: azure: osDisk: diskType:", "compute: platform: azure: ultraSSDCapability:", "compute: platform: azure: osDisk: diskEncryptionSet: resourceGroup:", "compute: platform: azure: osDisk: diskEncryptionSet: name:", "compute: platform: azure: osDisk: diskEncryptionSet: subscriptionId:", "compute: platform: azure: osImage: publisher:", "compute: platform: azure: osImage: offer:", "compute: platform: azure: osImage: sku:", "compute: platform: azure: osImage: version:", "compute: platform: azure: vmNetworkingType:", "compute: platform: azure: type:", "compute: platform: azure: zones:", "compute: platform: azure: settings: securityType:", "compute: platform: azure: settings: confidentialVM: uefiSettings: secureBoot:", "compute: platform: azure: settings: confidentialVM: uefiSettings: virtualizedTrustedPlatformModule:", "compute: platform: azure: settings: trustedLaunch: uefiSettings: secureBoot:", "compute: platform: azure: settings: trustedLaunch: uefiSettings: virtualizedTrustedPlatformModule:", "compute: platform: azure: osDisk: securityProfile: securityEncryptionType:", "controlPlane: platform: azure: settings: securityType:", "controlPlane: platform: azure: settings: confidentialVM: uefiSettings: secureBoot:", "controlPlane: platform: azure: settings: confidentialVM: uefiSettings: virtualizedTrustedPlatformModule:", "controlPlane: platform: azure: settings: trustedLaunch: uefiSettings: secureBoot:", "controlPlane: platform: azure: settings: trustedLaunch: uefiSettings: virtualizedTrustedPlatformModule:", "controlPlane: platform: azure: osDisk: securityProfile: securityEncryptionType:", "controlPlane: platform: azure: type:", "controlPlane: platform: azure: zones:", "platform: azure: defaultMachinePlatform: settings: securityType:", "platform: azure: defaultMachinePlatform: settings: confidentialVM: uefiSettings: secureBoot:", "platform: azure: defaultMachinePlatform: settings: confidentialVM: uefiSettings: virtualizedTrustedPlatformModule:", "platform: azure: defaultMachinePlatform: settings: trustedLaunch: uefiSettings: secureBoot:", "platform: azure: defaultMachinePlatform: settings: trustedLaunch: uefiSettings: virtualizedTrustedPlatformModule:", "platform: azure: defaultMachinePlatform: osDisk: securityProfile: securityEncryptionType:", "platform: azure: defaultMachinePlatform: encryptionAtHost:", "platform: azure: defaultMachinePlatform: osDisk: diskEncryptionSet: name:", "platform: azure: defaultMachinePlatform: osDisk: diskEncryptionSet: resourceGroup:", "platform: azure: defaultMachinePlatform: osDisk: diskEncryptionSet: subscriptionId:", "platform: azure: defaultMachinePlatform: osDisk: diskSizeGB:", "platform: azure: defaultMachinePlatform: osDisk: diskType:", "platform: azure: defaultMachinePlatform: osImage: publisher:", "platform: azure: defaultMachinePlatform: osImage: offer:", "platform: azure: defaultMachinePlatform: osImage: sku:", "platform: azure: defaultMachinePlatform: osImage: version:", "platform: azure: defaultMachinePlatform: type:", "platform: azure: defaultMachinePlatform: zones:", "controlPlane: platform: azure: encryptionAtHost:", "controlPlane: platform: azure: osDisk: diskEncryptionSet: resourceGroup:", "controlPlane: platform: azure: osDisk: diskEncryptionSet: name:", "controlPlane: platform: azure: osDisk: diskEncryptionSet: subscriptionId:", "controlPlane: platform: azure: osDisk: diskSizeGB:", "controlPlane: platform: azure: osDisk: diskType:", "controlPlane: platform: azure: osImage: publisher:", "controlPlane: platform: azure: osImage: offer:", "controlPlane: platform: azure: osImage: sku:", "controlPlane: platform: azure: osImage: version:", "controlPlane: platform: azure: ultraSSDCapability:", "controlPlane: platform: azure: vmNetworkingType:", "platform: azure: baseDomainResourceGroupName:", "platform: azure: resourceGroupName:", "platform: azure: outboundType:", "platform: azure: region:", "platform: azure: zone:", "platform: azure: customerManagedKey: keyVault: name:", "platform: azure: customerManagedKey: keyVault: keyName:", "platform: azure: customerManagedKey: keyVault: resourceGroup:", "platform: azure: customerManagedKey: keyVault: subscriptionId:", "platform: azure: customerManagedKey: userAssignedIdentityKey:", "platform: azure: defaultMachinePlatform: ultraSSDCapability:", "platform: azure: networkResourceGroupName:", "platform: azure: virtualNetwork:", "platform: azure: controlPlaneSubnet:", "platform: azure: computeSubnet:", "platform: azure: cloudName:", "platform: azure: defaultMachinePlatform: vmNetworkingType:", "operatorPublishingStrategy: apiserver:", "operatorPublishingStrategy: ingress:" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html-single/installing_on_azure/index
Chapter 32. LDAP connection
Chapter 32. LDAP connection Business Central provides a dedicated UserGroupCallback implementation for LDAP servers with Red Hat Decision Manager to enable the user task service to retrieve information on users, groups, and roles directly from an LDAP service. You can configure the following LDAP UserGroupCallback implementation properties: Table 32.1. LDAP UserGroupCallback properties Property Description ldap.bind.user User name for connecting to the LDAP server. This property is optional if it is not specified and the LDAP server accepts anonymous access. ldap.bind.pwd Password for connecting to the LDAP server. This property is optional if it is not specified and the LDAP server accepts anonymous access. ldap.user.ctx Context in LDAP with user information. ldap.role.ctx Context in LDAP with group and role. ldap.user.roles.ctx Context in LDAP with user group and role membership information. This property is optional if it is not specified and the ldap.role.ctx property is used instead. ldap.user.filter Filter for searching user information. This property usually contains substitution keys {0} that are replaced with parameters. ldap.role.filter Filter for searching group and role information. This property usually contains substitution keys {0} that are replaced with parameters. ldap.user.roles.filter Filter for searching user group and role membership information. This property usually contains substitution keys {0} that are replaced with parameters. ldap.user.attr.id Attribute name of the user ID in LDAP. This property is optional if it is not specified and the uid property is used instead. ldap.roles.attr.id Attribute name of the group and role ID in LDAP. This property is optional if it is not specified and the cn property is used instead. ldap.user.id.dn User ID in a DN, instructs the callback to query for user DN before searching for roles. This is optional and is false by default. java.naming.factory.initial Initial context factory class name; is com.sun.jndi.ldap.LdapCtxFactory by default. java.naming.security.authentication Authentication type where the possible values are none , simple , and strong . This is simple by default. java.naming.security.protocol Security protocol to be used, for example, ssl . java.naming.provider.url LDAP url (by default ldap://localhost:389 ; if the protocol is set to ssl then ldap://localhost:636 ) 32.1. LDAP UserGroupCallback implementation You can use the LDAP UserGroupCallback implementation by configuring the respective LDAP properties in one of the following ways: Programmatically: Build a properties object with the respective LDAPUserGroupCallbackImpl properties and create LDAPUserGroupCallbackImpl using the same properties object as its parameter. For example: Declaratively: Create the jbpm.usergroup.callback.properties file in the root of your application or specify the file location as a system property. For example: -Djbpm.usergroup.callback.properties=FILE_LOCATION_ON_CLASSPATH Ensure that you register the LDAP callback when starting the user task server. For example: Additional resources Roles and users Red Hat Single Sign-On Server Administration Guide Defining LDAP login domain LDAP login module LDAPExtended login module AdvancedLDAP login module AdvancedAdLDAP login module LDAP connectivity options LDAPUsers login module
[ "import org.kie.api.PropertiesConfiguration; import org.kie.api.task.UserGroupCallback; Properties properties = new Properties(); properties.setProperty(LDAPUserGroupCallbackImpl.USER_CTX, \"ou=People,dc=my-domain,dc=com\"); properties.setProperty(LDAPUserGroupCallbackImpl.ROLE_CTX, \"ou=Roles,dc=my-domain,dc=com\"); properties.setProperty(LDAPUserGroupCallbackImpl.USER_ROLES_CTX, \"ou=Roles,dc=my-domain,dc=com\"); properties.setProperty(LDAPUserGroupCallbackImpl.USER_FILTER, \"(uid={0})\"); properties.setProperty(LDAPUserGroupCallbackImpl.ROLE_FILTER, \"(cn={0})\"); properties.setProperty(LDAPUserGroupCallbackImpl.USER_ROLES_FILTER, \"(member={0})\"); UserGroupCallback ldapUserGroupCallback = new LDAPUserGroupCallbackImpl(properties); UserGroupCallbackManager.getInstance().setCallback(ldapUserGroupCallback);", "#ldap.bind.user= #ldap.bind.pwd= ldap.user.ctx=ou\\=People,dc\\=my-domain,dc\\=com ldap.role.ctx=ou\\=Roles,dc\\=my-domain,dc\\=com ldap.user.roles.ctx=ou\\=Roles,dc\\=my-domain,dc\\=com ldap.user.filter=(uid\\={0}) ldap.role.filter=(cn\\={0}) ldap.user.roles.filter=(member\\={0}) #ldap.user.attr.id= #ldap.roles.attr.id=" ]
https://docs.redhat.com/en/documentation/red_hat_decision_manager/7.13/html/managing_red_hat_decision_manager_and_kie_server_settings/managing-business-central-ldap-connection-con
Chapter 53. Deprecated Functionality in Red Hat Enterprise Linux 7
Chapter 53. Deprecated Functionality in Red Hat Enterprise Linux 7 Deprecated packages related to Identity Management The following packages are deprecated and will not be included in a future major release of Red Hat Enterprise Linux: Deprecated Packages Proposed Replacement Package or Product authconfig authselect pam_pkcs11 sssd [a] pam_krb5 sssd [b] openldap-servers Depending on the use case, migrate to Identity Management included in Red Hat Enterprise Linux or to Red Hat Directory Server. [c] [a] System Security Services Daemon (SSSD) contains enhanced smart card functionality. [b] For details on migrating from pam_krb5 to sssd , see How to migrate from pam_krb5 to SSSD Knowledgebase article on the Red Hat Customer Portal. [c] Red Hat Directory Server requires a valid Directory Server subscription. Deprecated Insecure Algorithms and Protocols Algorithms that provide cryptographic hashes and encryption as well as cryptographic protocols have a lifetime after which they are considered either too risky to use or plain insecure. See the Enhancing the Security of the Operating System with Cryptography Changes in Red Hat Enterprise Linux 7.4 Knowledgebase article on the Red Hat Customer Portal for more information. Weak ciphers and algorithms are no longer used by default in OpenSSH With this update, the OpenSSH library removes several weak ciphers and algorithms from default configurations. However, backward compatibility is ensured in most cases. The following have been removed from the OpenSSH server and client: Host key algorithms: [email protected] [email protected] Ciphers: arcfour256 arcfour128 arcfour [email protected] MACs: hmac-md5 hmac-md5-96 [email protected] [email protected] hmac-ripemd160 [email protected] [email protected] hmac-sha1-96 [email protected] The following have been removed from the OpenSSH client: Ciphers: blowfish-cbc cast128-cbc 3des-cbc OpenSSH no longer uses the SHA-1-based key exchange algorithms in FIPS mode This update removes the SHA-1-based key exchange algorithms from the default list in FIPS mode. To enable those algorithms, use the following configuration snippet for the ~/.ssh/config and /etc/ssh/sshd_config files: The SSH-1 protocol has been removed from the OpenSSH server SSH-1 protocol support has been removed from the OpenSSH server. For more information, see the The server-side SSH-1 protocol removal from RHEL 7.4 Knowledgebase article. MD5, MD4, and SHA0 can no longer be used as signing algorithms in OpenSSL With this update, support for verification of MD5, MD4, and SHA0 signatures in certificates, Certificate Revocation Lists (CRL) and message signatures has been removed. Additionally, the default algorithm for generating digital signatures has been changed from SHA-1 to SHA-256. The verification of SHA-1 signatures is still enabled for legacy purposes. The system administrator can enable MD5, MD4, or SHA0 support by modifying the LegacySigningMDs option in the etc/pki/tls/legacy-settings policy configuration file, for example: To add more than one legacy algorithm, use a comma or any whitespace character except for a new line. See the README.legacy-settings file in the OpenSSL package for more information. You can also enable MD5 verification by setting the OPENSSL_ENABLE_MD5_VERIFY environment variable. OpenSSL clients no longer allow connections to servers with DH shorter than 1024 bits This update prevents OpenSSL clients from connecting to servers with Diffie-Hellman (DH) parameters shorter than 1024 bits. This ensures that clients using OpenSSL are not susceptible to vulnerabilities, such as Logjam. The system administrator can enable shorter DH parameter support by modifying the MinimumDHBits option in the /etc/pki/tls/legacy-settings , for example: This option can also be used to raise the minimum if required by the system administrator. SSL 2.0 support has been completely removed from OpenSSL The SSL protocol version 2.0, which is considered insecure for more than seven years, was deprecated by RFC 6176 in 2011. In Red Hat Enterprise Linux, support of SSL 2.0 was already disabled by default. With this update, SSL 2.0 support has been removed completely. The OpenSSL library API calls that use this protocol version now return an error message. EXPORT cipher suites in OpenSSL have been deprecated This change removes support for EXPORT cipher suites from the OpenSSL toolkit. Disabling these weak cipher suites ensures that clients using OpenSSL are not susceptible to vulnerabilities, such as FREAK. EXPORT cipher suites are no longer required in any TLS protocol configurations. GnuTLS clients no longer allow connections to servers with DH shorter than 1024 bits This change prevents GNU Transport Layer Security (GnuTLS) clients from connecting to servers with Diffie-Hellman (DH) parameters shorter than 1024 bits. This ensures that clients using GnuTLS are not susceptible to vulnerabilities, such as Logjam. In applications that accept a priority string from the user or configuration directly, this change can be reverted by appending the priority string %PROFILE_VERY_WEAK to the used priority string. NSS clients using TLS no longer allow connections to servers with DH shorter than 1024 bits This change prevents Network Security Services (NSS) clients from connecting to servers with Diffie-Hellman (DH) parameters shorter than 1024 bits. This ensures that clients using NSS are not susceptible to vulnerabilities, such as Logjam. The system administrator can enable shorter DH parameter support by modifying the /etc/pki/nss-legacy/nss-rhel7.config policy configuration file to: Note that an empty line is required at the end of the file. EXPORT cipher suites in NSS have been deprecated This change removes support for EXPORT cipher suites in the Network Security Services (NSS) library. Disabling these weak cipher suites protects against vulnerabilities, such as FREAK. EXPORT cipher suites are not required in any TLS protocol configuration. Legacy CA certificates removed from the ca-certificates package Previously, to allow older versions of the GnuTLS , OpenSSL , and glib-networking libraries to remain compatible with the Public Key Infrastructure (PKI), the ca-certificates package included a set of legacy CA certificates with 1024-bit RSA keys as trusted by default. Since Red Hat Enterprise Linux 7.4, updated versions of OpenSSL , GnuTLS , and glib-networking are available, which are able to correctly identify a replacement of root CA certificates. Trusting these legacy CA certificates is no longer required for public web PKI compatibility. The legacy configuration mechanism, which could previously be used to disable the legacy CA certificates, is no longer supported; the list of legacy CA certificates has been changed to empty. The ca-legacy tool is still available and it also keeps current configuration settings for potential future reuse. coolkey replaced with opensc The OpenSC library implements the PKCS#11 API and replaces the coolkey packages. In Red Hat Enterprise Linux 7, the CoolKey Applet functionality is also provided by the opensc package. The coolkey package will remain supported for the lifetime of Red Hat Enterprise Linux 7, but new hardware enablement will be provided through the opensc package. The inputname option of the rsyslog imudp module has been deprecated The inputname option of the imudp module for the rsyslog service has been deprecated. Use the name option instead. FedFS has been deprecated Federated File System (FedFS) has been deprecated because the upstream FedFS project is no longer being actively maintained. Red Hat recommends migrating FedFS installations to use autofs , which provides more flexible functionality. Btrfs has been deprecated The Btrfs file system has been in Technology Preview state since the initial release of Red Hat Enterprise Linux 6. Red Hat will not be moving Btrfs to a fully supported feature and it will be removed in a future major release of Red Hat Enterprise Linux. The Btrfs file system did receive numerous updates from the upstream in Red Hat Enterprise Linux 7.4 and will remain available in the Red Hat Enterprise Linux 7 series. However, this is the last planned update to this feature. tcp_wrappers deprecated The tcp_wrappers package, which provides a library and a small daemon program that can monitor and filter incoming requests for systat , finger , FTP , telnet , rlogin , rsh , exec , tftp , talk , sshd , and other network services, has been deprecated. nautilus-open-terminal replaced with gnome-terminal-nautilus Since Red Hat Enterprise Linux 7.3, the nautilus-open-terminal package has been deprecated and replaced with the gnome-terminal-nautilus package. This package provides a Nautilus extension that adds the Open in Terminal option to the right-click context menu in Nautilus. nautilus-open-terminal is replaced by gnome-terminal-nautilus during the system upgrade. sslwrap() removed from Python The sslwrap() function has been removed from Python 2.7 . After the 466 Python Enhancement Proposal was implemented, using this function resulted in a segmentation fault. The removal is consistent with upstream. Red Hat recommends using the ssl.SSLContext class and the ssl.SSLContext.wrap_socket() function instead. Most applications can simply use the ssl.create_default_context() function, which creates a context with secure default settings. The default context uses the system's default trust store, too. Symbols from libraries linked as dependencies no longer resolved by ld Previously, the ld linker resolved any symbols present in any linked library, even if some libraries were linked only implicitly as dependencies of other libraries. This allowed developers to use symbols from the implicitly linked libraries in application code and omit explicitly specifying these libraries for linking. For security reasons, ld has been changed to not resolve references to symbols in libraries linked implicitly as dependencies. As a result, linking with ld fails when application code attempts to use symbols from libraries not declared for linking and linked only implicitly as dependencies. To use symbols from libraries linked as dependencies, developers must explicitly link against these libraries as well. To restore the behavior of ld , use the -copy-dt-needed-entries command-line option. (BZ# 1292230 ) Windows guest virtual machine support limited As of Red Hat Enterprise Linux 7, Windows guest virtual machines are supported only under specific subscription programs, such as Advanced Mission Critical (AMC). libnetlink is deprecated The libnetlink library contained in the iproute-devel package has been deprecated. The user should use the libnl and libmnl libraries instead. S3 and S4 power management states for KVM have been deprecated Native KVM support for the S3 (suspend to RAM) and S4 (suspend to disk) power management states has been discontinued. This feature was previously available as a Technology Preview. The Certificate Server plug-in udnPwdDirAuth is discontinued The udnPwdDirAuth authentication plug-in for the Red Hat Certificate Server was removed in Red Hat Enterprise Linux 7.3. Profiles using the plug-in are no longer supported. Certificates created with a profile using the udnPwdDirAuth plug-in are still valid if they have been approved. Red Hat Access plug-in for IdM is discontinued The Red Hat Access plug-in for Identity Management (IdM) was removed in Red Hat Enterprise Linux 7.3. During the update, the redhat-access-plugin-ipa package is automatically uninstalled. Features previously provided by the plug-in, such as Knowledgebase access and support case engagement, are still available through the Red Hat Customer Portal. Red Hat recommends to explore alternatives, such as the redhat-support-tool tool. The Ipsilon identity provider service for federated single sign-on The ipsilon packages were introduced as Technology Preview in Red Hat Enterprise Linux 7.2. Ipsilon links authentication providers and applications or utilities to allow for single sign-on (SSO). Red Hat does not plan to upgrade Ipsilon from Technology Preview to a fully supported feature. The ipsilon packages will be removed from Red Hat Enterprise Linux in a future minor release. Red Hat has released Red Hat Single Sign-On as a web SSO solution based on the Keycloak community project. Red Hat Single Sign-On provides greater capabilities than Ipsilon and is designated as the standard web SSO solution across the Red Hat product portfolio. Several rsyslog options deprecated The rsyslog utility version in Red Hat Enterprise Linux 7.4 has deprecated a large number of options. These options no longer have any effect and cause a warning to be displayed. The functionality previously provided by the options -c , -u , -q , -x , -A , -Q , -4 , and -6 can be achieved using the rsyslog configuration. There is no replacement for the functionality previously provided by the options -l and -s Deprecated symbols from the memkind library The following symbols from the memkind library have been deprecated: memkind_finalize() memkind_get_num_kind() memkind_get_kind_by_partition() memkind_get_kind_by_name() memkind_partition_mmap() memkind_get_size() MEMKIND_ERROR_MEMALIGN MEMKIND_ERROR_MALLCTL MEMKIND_ERROR_GETCPU MEMKIND_ERROR_PMTT MEMKIND_ERROR_TIEDISTANCE MEMKIND_ERROR_ALIGNMENT MEMKIND_ERROR_MALLOCX MEMKIND_ERROR_REPNAME MEMKIND_ERROR_PTHREAD MEMKIND_ERROR_BADPOLICY MEMKIND_ERROR_REPPOLICY Options of Sockets API Extensions for SCTP (RFC 6458) deprecated The options SCTP_SNDRCV , SCTP_EXTRCV and SCTP_DEFAULT_SEND_PARAM of Sockets API Extensions for the Stream Control Transmission Protocol have been deprecated per the RFC 6458 specification. New options SCTP_SNDINFO , SCTP_NXTINFO , SCTP_NXTINFO and SCTP_DEFAULT_SNDINFO have been implemented as a replacement for the deprecated options. Managing NetApp ONTAP using SSLv2 and SSLv3 is no longer supported by libstorageMgmt The SSLv2 and SSLv3 connections to the NetApp ONTAP storage array are no longer supported by the libstorageMgmt library. Users can contact NetApp support to enable the Transport Layer Security (TLS) protocol. dconf-dbus-1 has been deprecated and dconf-editor is now delivered separately With this update, the dconf-dbus-1 API has been removed. However, the dconf-dbus-1 library has been backported to preserve binary compatibility. Red Hat recommends using the GDBus library instead of dconf-dbus-1 . The dconf-error.h file has been renamed to dconf-enums.h . In addition, the dconf Editor is now delivered in the separate dconf-editor package; see Chapter 8, Desktop for more information. FreeRADIUS no longer accepts Auth-Type := System The FreeRADIUS server no longer accepts the Auth-Type := System option for the rlm_unix authentication module. This option has been replaced by the use of the unix module in the authorize section of the configuration file. Deprecated Device Drivers 3w-9xxx 3w-sas mptbase mptctl mptsas mptscsih mptspi mvsas qla3xxx The following controllers from the megaraid_sas driver have been deprecated: Dell PERC5, PCI ID 0x15 SAS1078R, PCI ID 0x60 SAS1078DE, PCI ID 0x7C SAS1064R, PCI ID 0x411 VERDE_ZCR, PCI ID 0x413 SAS1078GEN2, PCI ID 0x78 The following adapters from the qla2xxx driver have been deprecated: ISP24xx, PCI ID 0x2422 ISP24xx, PCI ID 0x2432 ISP2422, PCI ID 0x5422 QLE220, PCI ID 0x5432 QLE81xx, PCI ID 0x8001 QLE10000, PCI ID 0xF000 QLE84xx, PCI ID 0x8044 QLE8000, PCI ID 0x8432 QLE82xx, PCI ID 0x8021 The following Ethernet adapter controlled by the be2net driver has been deprecated: TIGERSHARK NIC, PCI ID 0x0700 The following controllers from the be2iscsi driver have been deprecated: Emulex OneConnect 10Gb iSCSI Initiator (generic), PCI ID 0x212 OCe10101, OCm10101, OCe10102, OCm10102 BE2 adapter family, PCI ID 0x702 OCe10100 BE2 adapter family, PCI ID 0x703 The following Emulex boards from the lpfc driver have been deprecated: BladeEngine 2 (BE2) Devices TIGERSHARK FCOE, PCI ID 0x0704 Fibre Channel (FC) Devices FIREFLY, PCI ID 0x1ae5 PROTEUS_VF, PCI ID 0xe100 BALIUS, PCI ID 0xe131 PROTEUS_PF, PCI ID 0xe180 RFLY, PCI ID 0xf095 PFLY, PCI ID 0xf098 LP101, PCI ID 0xf0a1 TFLY, PCI ID 0xf0a5 BSMB, PCI ID 0xf0d1 BMID, PCI ID 0xf0d5 ZSMB, PCI ID 0xf0e1 ZMID, PCI ID 0xf0e5 NEPTUNE, PCI ID 0xf0f5 NEPTUNE_SCSP, PCI ID 0xf0f6 NEPTUNE_DCSP, PCI ID 0xf0f7 FALCON, PCI ID 0xf180 SUPERFLY, PCI ID 0xf700 DRAGONFLY, PCI ID 0xf800 CENTAUR, PCI ID 0xf900 PEGASUS, PCI ID 0xf980 THOR, PCI ID 0xfa00 VIPER, PCI ID 0xfb00 LP10000S, PCI ID 0xfc00 LP11000S, PCI ID 0xfc10 LPE11000S, PCI ID 0xfc20 PROTEUS_S, PCI ID 0xfc50 HELIOS, PCI ID 0xfd00 HELIOS_SCSP, PCI ID 0xfd11 HELIOS_DCSP, PCI ID 0xfd12 ZEPHYR, PCI ID 0xfe00 HORNET, PCI ID 0xfe05 ZEPHYR_SCSP, PCI ID 0xfe11 ZEPHYR_DCSP, PCI ID 0xfe12 To check the PCI IDs of the hardware on your system, run the lspci -nn command. Note that other controllers from the mentioned drivers that are not listed here remain unchanged. SFN4XXX adapters have been deprecated Starting with Red Hat Enterprise Linux 7.4, SFN4XXX Solarflare network adapters have been deprecated. Previously, Solarflare had a single driver sfc for all adapters. Recently, support of SFN4XXX was split from sfc and moved into a new SFN4XXX-only driver, called sfc-falcon . Both drivers continue to be supported at this time, but sfc-falcon and SFN4XXX support is scheduled for removal in a future major release. Software initiated only FCoE storage technologies have been deprecated The software initiated only portion of Fibre Channel over Ethernet (FCoE) storage technology has been deprecated due to limited customer adoption. The software initiated only storage technology will remain supported for the life of Red Hat Enterprise Linux 7. The deprecation notice indicates the intention to remove software-initiated-based FCoE support in a future major release of Red Hat Enterprise Linux. It is important to note that the hardware support and the associated userspace tools (such as drivers, libfc , or libfcoe ) are unaffected by this deprecation notice. Containers using the libvirt-lxc tooling have been deprecated The following libvirt-lxc packages are deprecated since Red Hat Enterprise Linux 7.1: libvirt-daemon-driver-lxc libvirt-daemon-lxc libvirt-login-shell Future development on the Linux containers framework is now based on the docker command-line interface. libvirt-lxc tooling may be removed in a future release of Red Hat Enterprise Linux (including Red Hat Enterprise Linux 7) and should not be relied upon for developing custom container management applications. For more information, see the Red Hat KnowledgeBase article .
[ "KexAlgorithms=+diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha1", "echo 'LegacySigningMDs algorithm ' >> /etc/pki/tls/legacy-settings", "echo 'MinimumDHBits 768 ' > /etc/pki/tls/legacy-settings", "library= name=Policy NSS=flags=policyOnly,moduleDB config=\"allow=DH-MIN=767:DSA-MIN=767:RSA-MIN=767\"" ]
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/7.4_release_notes/chap-red_hat_enterprise_linux-7.4_release_notes-deprecated_functionality_in_rhel7
Chapter 1. Backing up Directory Server
Chapter 1. Backing up Directory Server A backup in Directory Server contains the following files: An LDIF file dse_index.ldif containing database indexed attributes An LDIF file dse_instance.ldif containing instance configuration attributes A directory for each backend, for example userRoot , which contains .db files for indexes defined in the database A transaction log file log.* A database version file DBVERSION Note that Directory Server does not support backing up individual databases. For details about backing up other important files, such as the configuration, see Backing up configuration files, the certificate database, and custom schema files . In contrast to a backup, you can export data as described in Exporting data from Directory Server . Use the export feature to export specific data from a server, such as a subtree, in LDIF format. 1.1. Backing up all databases using the command line while the instance is running To back up all databases of the Directory Server instance that is running, use the dsconf backup create command. Important Directory Server cleans the changelog when the database is restored from the online backup. Therefore, using online backup requires you to reinitialize the replica after the database restore. To avoid reinitialization, use the offline backup. Prerequisites The dirsrv user has write permissions in the destination directory. Note that Directory Server uses its own private directories by default. As a result, backups and exports under directories /var/tmp/ , /tmp/ , and /root/ fail unless you disabled the PrivateTmp systemd directive. The Directory Server instance is running. Procedure Back up all databases: # dsconf -D "cn= Directory Manager " ldap://server.example.com backup create The backup create task has finished successfully By default, dsconf stores the backup in a subdirectory called instance_name-YYYY_MM_DD_hh_mm_ss in the /var/lib/dirsrv/slapd- instance_name /bak/ directory. To specify a different location, append a directory name to the command. Search the /var/log/dirsrv/slapd- instance_name /errors log for problems during the backup. Additional resources To display all additional settings that you can use to back up data, see the output of the dsconf ldap://server.example.com backup create --help command. Backing up configuration files, the certificate database, and custom schema files Restoring all databases using the command line while the instance is running Exporting data from Directory Server 1.2. Backing up all databases using the command line while the instance is offline To back up databases when the Directory Server instance is offline, use the dsctl db2bak command. Prerequisites The dirsrv user has write permissions in the destination directory. Note that Directory Server uses its own private directories by default. As a result, backups and exports under directories /var/tmp/ , /tmp/ , and /root/ fail unless you disabled the PrivateTmp systemd directive. The Directory Server instance is not running. Procedure Back up all databases: By default, dsctl db2bak stores the backup in the instance_name-YYYY_MM_DD_hh_mm_ss subdirectory in the /var/lib/dirsrv/slapd- instance_name /bak/ directory. To specify a different location, append a directory name to the command. Optionally, pass the -v option to the command to display verbose output: Search the /var/log/dirsrv/slapd- instance_name /errors log for problems during the backup. Optional: Start the instance: Additional resources Backing up configuration files, the certificate database, and custom schema files Restoring all databases using the command line while the instance is offline Exporting data from Directory Server 1.3. Backing up all databases using the web console Directory Server supports data backup using the web console. Important Directory Server cleans the changelog when the database is restored from the online backup. Therefore, using online backup requires you to reinitialize the replica after the database restore. To avoid reinitialization, use the offline backup. Prerequisites The dirsrv user has write permissions in the destination directory. Note that Directory Server uses its own private directories by default. As a result, backups and exports under /var/tmp/ , /tmp/ , and /root/ directories fail unless you disabled the PrivateTmp systemd directive. You are logged in to the instance in the web console. Procedure Click the Actions button, and select Manage Backups . Click Create Backup . Enter a name for the backup, such as a time stamp to indicate the creation date and time of the backup. Click Create Backup . To check the log for problems during the backup, open the Monitoring Logging Errors Log menu. The server stores the backup in a subdirectory with the name you entered in the /var/lib/dirsrv/slapd- instance_name /bak/ directory. Additional resources Backing up configuration files, the certificate database, and custom schema files Restoring all databases using the web console Exporting data from Directory Server 1.4. Backing up configuration files, the certificate database, and custom schema files When you back up databases while the instance is online or offline, Directory Server also backs up configuration files, the certificate database, and custom schema files. The dsconf backup create and dsctl db2bak commands back up files to the /var/lib/dirsrv/slapd- instance_name /bak/ example_backup /config_files/ backup default directory. You might need these files to restore the instance on a different server after a hardware failure. Important During the backup, do not update the certificate database. Otherwise, this database might not be consistent in the backup. Procedure Perform backup of Directory Server while the instance is running or is offline as described in Backing up all databases using the command line while the instance is running or Backing up all databases using the command line while the instance is offline . Verification Find backed up configuration files in the backup directory: Note Directory Server does not automatically restore backed up configuration files. You need to restore these files manually. Additional resources Restoring configuration files, the certificate database, and custom schema files
[ "dsconf -D \"cn= Directory Manager \" ldap://server.example.com backup create The backup create task has finished successfully", "dsctl instance_name db2bak db2bak successful", "dsctl -v instance_name db2bak DEBUG: Instance allocated DEBUG: systemd status -> True INFO: db2bak successful", "dsctl instance_name start", "ls /var/lib/dirsrv/slapd- instance_name /bak/ example_backup /config_files/" ]
https://docs.redhat.com/en/documentation/red_hat_directory_server/12/html/backing_up_and_restoring_red_hat_directory_server/backing-up-directory-server_backing-up-and-restoring-rhds
Chapter 10. Troubleshooting CephFS PVC creation in external mode
Chapter 10. Troubleshooting CephFS PVC creation in external mode If you have updated the Red Hat Ceph Storage cluster from a version lower than 4.1.1 to the latest release and is not a freshly deployed cluster, you must manually set the application type for CephFS pool on the Red Hat Ceph Storage cluster to enable CephFS PVC creation in external mode. Check for CephFS pvc stuck in Pending status. Example output : Check the describe output to see the events for respective pvc. Expected error message is cephfs_metadata/csi.volumes.default/csi.volume.pvc-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx: (1) Operation not permitted) Example output: Check the settings for the <cephfs metadata pool name> (here cephfs_metadata ) and <cephfs data pool name> (here cephfs_data ). For running the command, you will need jq preinstalled in the Red Hat Ceph Storage client node. Set the application type for CephFS pool. Run the following commands on the Red Hat Ceph Storage client node : Verify if the settings are applied. Check the CephFS PVC status again. The PVC should now be in Bound state. Example output :
[ "oc get pvc -n <namespace>", "NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ngx-fs-pxknkcix20-pod Pending ocs-external-storagecluster-cephfs 28h [...]", "oc describe pvc ngx-fs-pxknkcix20-pod -n nginx-file", "Name: ngx-fs-pxknkcix20-pod Namespace: nginx-file StorageClass: ocs-external-storagecluster-cephfs Status: Pending Volume: Labels: <none> Annotations: volume.beta.kubernetes.io/storage-provisioner: openshift-storage.cephfs.csi.ceph.com Finalizers: [kubernetes.io/pvc-protection] Capacity: Access Modes: VolumeMode: Filesystem Mounted By: ngx-fs-oyoe047v2bn2ka42jfgg-pod-hqhzf Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning ProvisioningFailed 107m (x245 over 22h) openshift-storage.cephfs.csi.ceph.com_csi-cephfsplugin-provisioner-5f8b66cc96-hvcqp_6b7044af-c904-4795-9ce5-bf0cf63cc4a4 (combined from similar events): failed to provision volume with StorageClass \"ocs-external-storagecluster-cephfs\": rpc error: code = Internal desc = error (an error (exit status 1) occurred while running rados args: [-m 192.168.13.212:6789,192.168.13.211:6789,192.168.13.213:6789 --id csi-cephfs-provisioner --keyfile= stripped -c /etc/ceph/ceph.conf -p cephfs_metadata getomapval csi.volumes.default csi.volume.pvc-1ac0c6e6-9428-445d-bbd6-1284d54ddb47 /tmp/omap-get-186436239 --namespace=csi]) occurred, command output streams is ( error getting omap value cephfs_metadata/csi.volumes.default/csi.volume.pvc-1ac0c6e6-9428-445d-bbd6-1284d54ddb47: (1) Operation not permitted)", "ceph osd pool ls detail --format=json | jq '.[] | select(.pool_name| startswith(\"cephfs\")) | .pool_name, .application_metadata' \"cephfs_data\" { \"cephfs\": {} } \"cephfs_metadata\" { \"cephfs\": {} }", "ceph osd pool application set <cephfs metadata pool name> cephfs metadata cephfs", "ceph osd pool application set <cephfs data pool name> cephfs data cephfs", "ceph osd pool ls detail --format=json | jq '.[] | select(.pool_name| startswith(\"cephfs\")) | .pool_name, .application_metadata' \"cephfs_data\" { \"cephfs\": { \"data\": \"cephfs\" } } \"cephfs_metadata\" { \"cephfs\": { \"metadata\": \"cephfs\" } }", "oc get pvc -n <namespace>", "NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE ngx-fs-pxknkcix20-pod Bound pvc-1ac0c6e6-9428-445d-bbd6-1284d54ddb47 1Mi RWO ocs-external-storagecluster-cephfs 29h [...]" ]
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.13/html/troubleshooting_openshift_data_foundation/troubleshooting-cephfs-pvc-creation-in-external-mode_rhodf
Chapter 323. Spring Redis Component
Chapter 323. Spring Redis Component Available as of Camel version 2.11 This component allows sending and receiving messages from Redis . Redis is advanced key-value store where keys can contain strings, hashes, lists, sets and sorted sets. In addition it provides pub/sub functionality for inter-app communications. Camel provides a producer for executing commands, consumer for subscribing to pub/sub messages an idempotent repository for filtering out duplicate messages. INFO:*Prerequisites* In order to use this component, you must have a Redis server running. 323.1. URI Format spring-redis://host:port[?options] You can append query options to the URI in the following format, ?options=value&option2=value&... 323.2. URI Options The Spring Redis component has no options. The Spring Redis endpoint is configured using URI syntax: with the following path and query parameters: 323.2.1. Path Parameters (2 parameters): Name Description Default Type host Required The host where Redis server is running. String port Required Redis server port number Integer 323.2.2. Query Parameters (10 parameters): Name Description Default Type channels (common) List of topic names or name patterns to subscribe to. Multiple names can be separated by comma. String command (common) Default command, which can be overridden by message header. Notice the consumer only supports the following commands: PSUBSCRIBE and SUBSCRIBE SET Command connectionFactory (common) Reference to a pre-configured RedisConnectionFactory instance to use. RedisConnectionFactory redisTemplate (common) Reference to a pre-configured RedisTemplate instance to use. RedisTemplate serializer (common) Reference to a pre-configured RedisSerializer instance to use. RedisSerializer bridgeErrorHandler (consumer) Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. false boolean exceptionHandler (consumer) To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. ExceptionHandler exchangePattern (consumer) Sets the exchange pattern when the consumer creates an exchange. ExchangePattern listenerContainer (consumer) Reference to a pre-configured RedisMessageListenerContainer instance to use. RedisMessageListener Container synchronous (advanced) Sets whether synchronous processing should be strictly used, or Camel is allowed to use asynchronous processing (if supported). false boolean 323.3. Spring Boot Auto-Configuration The component supports 2 options, which are listed below. Name Description Default Type camel.component.spring-redis.enabled Enable spring-redis component true Boolean camel.component.spring-redis.resolve-property-placeholders Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. true Boolean 323.4. Usage See also the unit tests available at https://github.com/apache/camel/tree/master/components/camel-spring-redis/src/test/java/org/apache/camel/component/redis . 323.4.1. Message headers evaluated by the Redis producer The producer issues commands to the server and each command has different set of parameters with specific types. The result from the command execution is returned in the message body. Hash Commands Description Parameters Result HSET Set the string value of a hash field CamelRedis.Key (String), CamelRedis.Field (String), CamelRedis.Value (Object) void HGET Get the value of a hash field CamelRedis.Key (String), CamelRedis.Field (String) String HSETNX Set the value of a hash field, only if the field does not exist CamelRedis.Key (String), CamelRedis.Field (String), CamelRedis.Value (Object) void HMSET Set multiple hash fields to multiple values CamelRedis.Key (String), CamelRedis.Values(Map<String, Object>) void HMGET Get the values of all the given hash fields CamelRedis.Key (String), CamelRedis.Fields (Collection<String>) Collection<Object> HINCRBY Increment the integer value of a hash field by the given number CamelRedis.Key (String), CamelRedis.Field (String), CamelRedis.Value (Long) Long HEXISTS Determine if a hash field exists CamelRedis.Key (String), CamelRedis.Field (String) Boolean HDEL Delete one or more hash fields CamelRedis.Key (String), CamelRedis.Field (String) void HLEN Get the number of fields in a hash CamelRedis.Key (String) Long HKEYS Get all the fields in a hash CamelRedis.Key (String) Set<String> HVALS Get all the values in a hash CamelRedis.Key (String) Collection<Object> HGETALL Get all the fields and values in a hash CamelRedis.Key (String) Map<String, Object> List Commands Description Parameters Result RPUSH Append one or multiple values to a list CamelRedis.Key (String), CamelRedis.Value (Object) Long RPUSHX Append a value to a list, only if the list exists CamelRedis.Key (String), CamelRedis.Value (Object) Long LPUSH Prepend one or multiple values to a list CamelRedis.Key (String), CamelRedis.Value (Object) Long LLEN Get the length of a list CamelRedis.Key (String) Long LRANGE Get a range of elements from a list CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long) List<Object> LTRIM Trim a list to the specified range CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long) void LINDEX Get an element from a list by its index CamelRedis.Key (String), CamelRedis.Index (Long) String LINSERT Insert an element before or after another element in a list CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Pivot (String), CamelRedis.Position (String) Long LSET Set the value of an element in a list by its index CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Index (Long) void LREM Remove elements from a list CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Count (Long) Long LPOP Remove and get the first element in a list CamelRedis.Key (String) Object RPOP Remove and get the last element in a list CamelRedis.Key (String) String RPOPLPUSH Remove the last element in a list, append it to another list and return it CamelRedis.Key (String), CamelRedis.Destination (String) Object BRPOPLPUSH Pop a value from a list, push it to another list and return it; or block until one is available CamelRedis.Key (String), CamelRedis.Destination (String), CamelRedis.Timeout (Long) Object BLPOP Remove and get the first element in a list, or block until one is available CamelRedis.Key (String), CamelRedis.Timeout (Long) Object BRPOP Remove and get the last element in a list, or block until one is available CamelRedis.Key (String), CamelRedis.Timeout (Long) String Set Commands Description Parameters Result SADD Add one or more members to a set CamelRedis.Key (String), CamelRedis.Value (Object) Boolean SMEMBERS Get all the members in a set CamelRedis.Key (String) Set<Object> SREM Remove one or more members from a set CamelRedis.Key (String), CamelRedis.Value (Object) Boolean SPOP Remove and return a random member from a set CamelRedis.Key (String) String SMOVE Move a member from one set to another CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Destination (String) Boolean SCARD Get the number of members in a set CamelRedis.Key (String) Long SISMEMBER Determine if a given value is a member of a set CamelRedis.Key (String), CamelRedis.Value (Object) Boolean SINTER Intersect multiple sets CamelRedis.Key (String), CamelRedis.Keys (String) Set<Object> SINTERSTORE Intersect multiple sets and store the resulting set in a key CamelRedis.Key (String), CamelRedis.Keys (String), CamelRedis.Destination (String) void SUNION Add multiple sets CamelRedis.Key (String), CamelRedis.Keys (String) Set<Object> SUNIONSTORE Add multiple sets and store the resulting set in a key CamelRedis.Key (String), CamelRedis.Keys (String), CamelRedis.Destination (String) void SDIFF Subtract multiple sets CamelRedis.Key (String), CamelRedis.Keys (String) Set<Object> SDIFFSTORE Subtract multiple sets and store the resulting set in a key CamelRedis.Key (String), CamelRedis.Keys (String), CamelRedis.Destination (String) void SRANDMEMBER Get one or multiple random members from a set CamelRedis.Key (String) String Ordered set Commands Description Parameters Result ZADD Add one or more members to a sorted set, or update its score if it already exists CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Score (Double) Boolean ZRANGE Return a range of members in a sorted set, by index CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long), CamelRedis.WithScore (Boolean) Object ZREM Remove one or more members from a sorted set CamelRedis.Key (String), CamelRedis.Value (Object) Boolean ZINCRBY Increment the score of a member in a sorted set CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Increment (Double) Double ZRANK Determine the index of a member in a sorted set CamelRedis.Key (String), CamelRedis.Value (Object) Long ZREVRANK Determine the index of a member in a sorted set, with scores ordered from high to low CamelRedis.Key (String), CamelRedis.Value (Object) Long ZREVRANGE Return a range of members in a sorted set, by index, with scores ordered from high to low CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long), CamelRedis.WithScore (Boolean) Object ZCARD Get the number of members in a sorted set CamelRedis.Key (String) Long ZCOUNT Count the members in a sorted set with scores within the given values CamelRedis.Key (String), CamelRedis.Min (Double), CamelRedis.Max (Double) Long ZRANGEBYSCORE Return a range of members in a sorted set, by score CamelRedis.Key (String), CamelRedis.Min (Double), CamelRedis.Max (Double) Set<Object> ZREVRANGEBYSCORE Return a range of members in a sorted set, by score, with scores ordered from high to low CamelRedis.Key (String), CamelRedis.Min (Double), CamelRedis.Max (Double) Set<Object> ZREMRANGEBYRANK Remove all members in a sorted set within the given indexes CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long) void ZREMRANGEBYSCORE Remove all members in a sorted set within the given scores CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long) void ZUNIONSTORE Add multiple sorted sets and store the resulting sorted set in a new key CamelRedis.Key (String), CamelRedis.Keys (String), CamelRedis.Destination (String) void ZINTERSTORE Intersect multiple sorted sets and store the resulting sorted set in a new key CamelRedis.Key (String), CamelRedis.Keys (String), CamelRedis.Destination (String) void String Commands Description Parameters Result SET Set the string value of a key CamelRedis.Key (String), CamelRedis.Value (Object) void GET Get the value of a key CamelRedis.Key (String) Object STRLEN Get the length of the value stored in a key CamelRedis.Key (String) Long APPEND Append a value to a key CamelRedis.Key (String), CamelRedis.Value (String) Integer SETBIT Sets or clears the bit at offset in the string value stored at key CamelRedis.Key (String), CamelRedis.Offset (Long), CamelRedis.Value (Boolean) void GETBIT Returns the bit value at offset in the string value stored at key CamelRedis.Key (String), CamelRedis.Offset (Long) Boolean SETRANGE Overwrite part of a string at key starting at the specified offset CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Offset (Long) void GETRANGE Get a substring of the string stored at a key CamelRedis.Key (String), CamelRedis.Start (Long), CamelRedis.End (Long) String SETNX Set the value of a key, only if the key does not exist CamelRedis.Key (String), CamelRedis.Value (Object) Boolean SETEX Set the value and expiration of a key CamelRedis.Key (String), CamelRedis.Value (Object), CamelRedis.Timeout (Long), SECONDS void DECRBY Decrement the integer value of a key by the given number CamelRedis.Key (String), CamelRedis.Value (Long) Long DECR Decrement the integer value of a key by one CamelRedis.Key (String), Long INCRBY Increment the integer value of a key by the given amount CamelRedis.Key (String), CamelRedis.Value (Long) Long INCR Increment the integer value of a key by one CamelRedis.Key (String) Long MGET Get the values of all the given keys CamelRedis.Fields (Collection<String>) List<Object> MSET Set multiple keys to multiple values CamelRedis.Values(Map<String, Object>) void MSETNX Set multiple keys to multiple values, only if none of the keys exist CamelRedis.Key (String), CamelRedis.Value (Object) void GETSET Set the string value of a key and return its old value CamelRedis.Key (String), CamelRedis.Value (Object) Object Key Commands Description Parameters Result EXISTS Determine if a key exists CamelRedis.Key (String) Boolean DEL Delete a key CamelRedis.Keys (String) void TYPE Determine the type stored at key CamelRedis.Key (String) DataType KEYS Find all keys matching the given pattern CamelRedis.Pattern (String) Collection<String> RANDOMKEY Return a random key from the keyspace CamelRedis.Pattern (String), CamelRedis.Value (String) String RENAME Rename a key CamelRedis.Key (String) void RENAMENX Rename a key, only if the new key does not exist CamelRedis.Key (String), CamelRedis.Value (String) Boolean EXPIRE Set a key's time to live in seconds CamelRedis.Key (String), CamelRedis.Timeout (Long) Boolean SORT Sort the elements in a list, set or sorted set CamelRedis.Key (String) List<Object> PERSIST Remove the expiration from a key CamelRedis.Key (String) Boolean EXPIREAT Set the expiration for a key as a UNIX timestamp CamelRedis.Key (String), CamelRedis.Timestamp (Long) Boolean PEXPIRE Set a key's time to live in milliseconds CamelRedis.Key (String), CamelRedis.Timeout (Long) Boolean PEXPIREAT Set the expiration for a key as a UNIX timestamp specified in milliseconds CamelRedis.Key (String), CamelRedis.Timestamp (Long) Boolean TTL Get the time to live for a key CamelRedis.Key (String) Long MOVE Move a key to another database CamelRedis.Key (String), CamelRedis.Db (Integer) Boolean Other Command Description Parameters Result MULTI Mark the start of a transaction block none void DISCARD Discard all commands issued after MULTI none void EXEC Execute all commands issued after MULTI none void WATCH Watch the given keys to determine execution of the MULTI/EXEC block CamelRedis.Keys (String) void UNWATCH Forget about all watched keys none void ECHO Echo the given string CamelRedis.Value (String) String PING Ping the server none String QUIT Close the connection none void PUBLISH Post a message to a channel CamelRedis.Channel (String), CamelRedis.Message (Object) void 323.5. Dependencies Maven users will need to add the following dependency to their pom.xml. pom.xml <dependency> <groupId>org.apache.camel</groupId> <artifactId>camel-spring-redis</artifactId> <version>USD{camel-version}</version> </dependency> where USD{camel-version } must be replaced by the actual version of Camel (2.11 or higher). 323.6. See Also Configuring Camel Component Endpoint Getting Started
[ "spring-redis://host:port[?options]", "spring-redis:host:port", "<dependency> <groupId>org.apache.camel</groupId> <artifactId>camel-spring-redis</artifactId> <version>USD{camel-version}</version> </dependency>" ]
https://docs.redhat.com/en/documentation/red_hat_fuse/7.13/html/apache_camel_component_reference/spring-redis-component
Bare Metal Provisioning
Bare Metal Provisioning Red Hat OpenStack Platform 16.0 Install, Configure, and Use the Bare Metal Service (Ironic) OpenStack Documentation Team [email protected]
null
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.0/html/bare_metal_provisioning/index
Chapter 4. Release Information
Chapter 4. Release Information These release notes highlight technology preview items, recommended practices, known issues, and deprecated functionality to be taken into consideration when deploying this release of Red Hat Virtualization. Notes for updates released during the support lifecycle of this Red Hat Virtualization release will appear in the advisory text associated with each update or the Red Hat Virtualization Technical Notes . This document is available from the following page: https://access.redhat.com/documentation/en-us/red_hat_virtualization 4.1. Red Hat Virtualization 4.3 General Availability (ovirt-4.3.3) 4.1.1. Bug Fix The items listed in this section are bugs that were addressed in this release: BZ# 1403675 Previously, Red Hat Virtualization Manager did not handle hosts added to it over an IPV6-only network. In the current release, you can use the Manager's Administration Portal and REST API to add and manage hosts over a statically configured IPV6-only network. BZ# 1441741 In the current release, the v4 API documentation shows how to retrieve the IP addresses of a virtual machine. BZ# 1496395 Previously, memory hot unplug did not work in virtual machines started from snapshots. This has been fixed in the current release: Memory hot unplug works in virtual machines started from snapshots. BZ# 1507965 Previously, selecting File > Change CD in the Windows 10 version of virt-viewer did not work. The current release fixes this issue. BZ# 1520848 This release updates the VM video RAM settings to ensure enough RAM is present for any Linux guest operating system. BZ# 1536397 Previously, CloudInit passed the dns_search value incorrectly as the dns_namesever value. For example, after configuring a the Networks settings of a virtual machine and runinng it, the dns_search value showed up in the resolv.conf file as the dns_namesever value. The current release fixes this issue. BZ# 1539766 Previously, when accessing RHEL 6 virtual machines from a Windows 7 client using virt-viewer, copy/paste sporadically failed. The current release fixes this issue. BZ# 1548846 Previously a "Removed device not found in conf" warning appeared in the vdsm.log after performing a successful hot unplug. In this release, after performing a successful hot unplug, this warning message will no longer appear in vdsm.log. BZ# 1552533 This release renames the 'MaxBlockDiskSize' option to 'MaxBlockDiskSizeInGibiBytes'. BZ# 1559041 Previously, guest virtual machines with USB support enabled became unresponsive after migration to a different host. The current release fixes this issue and guests work as expected after migration. BZ# 1560460 Previously, VDSM used stat() to implement islink() checks when using ioprocess to run commands. As a result, if a user or storage system created a recursive symbolic link inside the ISO storage domain, VDSM failed to report file information. In the current release, VDSM uses lstat() to implement islink() so it can report file information from recursive symbolic links. BZ# 1561964 The serial console was missing in a self-hosted engine VM created with node zero deployment. In this release, the serial console is defined correctly. BZ# 1565178 Previously, you could manage snapshots through the Administration Portal, but not in the VM Portal. In the current release, you can manage snapshots through the VM portal. BZ# 1567936 Previously, the ovirt-cockpit-sso configuration file, cockpit.conf, triggered security and integrity alerts during the verification process. In the current release, the ovirt-cockpit-sso configuration file is marked as a configuration file and is excluded from the verification process, which helps prevent false security and integrity alerts. BZ# 1570851 Previously, on a Windows client machine, if a non-English locale was selected, the spice client (remote-viewer) displayed some translated UI elements in English, not the locale language. The current release fixes this and presents those translated UI elements in the locale language. BZ# 1575777 Previously, a floppy drive in a virtual machine could prevent the virtual machine from being imported. In the current release, floppy drives are ignored during import. BZ# 1583038 A VDSM yum plugin named 'vdsm.py' was added. Consequently, the Self-Hosted Engine setup imported the wrong vdsm module, causing it to fail. The name of the plugin was changed and now the Self-Hosted Engine setup completes successfully. BZ# 1583968 The self-hosted engine VM was selected for balancing although the BalanceVM command was not enabled for the self-hosted engine. In this release, balancing is no longer blocked. BZ# 1589612 When a virtual machine starts, VDSM uses the domain metadata section to store data which is required to configure a virtual machine but which is not adequately represented by the standard libvirt domain. Previously, VDSM stored drive IO tune settings in this metadata that were redundant because they already had proper representation in the libvirt domain. Furthermore, if IO tune settings were enabled, a bug in storing the IO tune settings prevented the virtual machine from starting. The current release removes the redundant information from the domain metadata and fixes the bug that prevented virtual machines from starting. BZ# 1591693 Do not use a VNC-based connection to deploy Red Hat Virtualization Manager as a self-hosted engine. The VNC protocol does not support password auth in FIPS mode. As a result, the self-hosted engine will fail to deploy. Instead, deploy the Manager as a self-hosted engine, use a SPICE-based connection. BZ# 1593568 Previously, if a CD-ROM was ejected from a virtual machine and VDSM was fenced or restarted, the virtual machine became unresponsive and/or the Manager reported its status as "Unknown." In the current release, a virtual machine with an ejected CD-ROM recovers after restarting VDSM. BZ# 1594615 The release improves upon the fix in BZ#1518253 to allow for a faster abort process and a more easily understood error message. BZ# 1595285 There was a bug in the REST API for non-administrator users related to VNIC Profiles. Consequently, an error message appeared saying "GET_ALL_VNIC_PROFILES failed query execution failed due to insufficient permissions." The code was fixed and the error no longer occurs. BZ# 1595489 This release ensures that VMs existing in Red Hat Virtualization Manager version 4.2.3 or earlier do not lose their CD-ROM device if the VMs are restarted in 4.2.3 or later versions. BZ# 1608093 Previously, with some error conditions, the VM Portal displayed a completely white screen with no error message or debugging information. The current release fixes this issue: All error conditions display an error message and stack trace in the browser console. BZ# 1614430 Vdsm-gluster tries to run heal operations on all volumes. Previously, if the gluster commands got stuck, VDSM started waiting indefinitely for them, exhausting threads, until it timed-out. Then it stopped communicating with the Manager and went offline. The current release adds a timeout to the gluster heal info command so the command terminates within a set timeout and threads do not become exhausted. On timeout, the system issues a GlusterCommandTimeoutException, which causes the command to exit and notifies the Manager. As a result, VDSM threads are not stuck, and VDSM does not go offline. BZ# 1617745 Previously, when a migrating virtual machine was not properly set up on the destination host, it could still start there under certain circumstances, then run unnoticed and without VDSM supervision. This situation sometimes resulted in split-brain. Now migration is always prevented from starting if the virtual machine set up fails on the destination host. BZ# 1619154 Previously, while a datacenter was enforcing a quota, using the VM Portal to create a virtual machine from a blank template generated an error. The current release fixes this issue. BZ# 1619474 This release ensures that if a request occurs to disable I/O threads of a running VM, the I/O threads disable when the VM goes down. BZ# 1619866 This release ensures that if a request occurs to disable I/O threads of a running VM, the I/O threads setting remains disabled when changing unrelated properties of a running VM. BZ# 1622068 Previously, after importing a guest from an ova file, the Import Virtual Machine dialog displayed the network type as "Dual-mode rt8319, VirtIO", when it should have been only "VirtIO". The current release fixes this issue. BZ# 1626907 This release prevents VM snapshot creation when the VM is in a non-responding state to preclude database corruption due to an inconsistent image structure. BZ# 1628836 This fix allows the self-hosted engine virtual machine to run on the host. BZ# 1631360 The release changed the system manufacturer of virtual machines from "Red Hat" to "oVirt". This was inconsistent with preceding versions. Some users depended on this field to determine the underlying hypervisor. The current release fixes this issue by setting the SMBIOS manufacturer according to the product being used, which is indicated by the 'OriginType' configuration value. As a result, the manufacturer is set to 'oVirt' when oVirt is being used, and 'Red Hat' when Red Hat Virtualization is being used. BZ# 1631392 Previously, in the Administration Portal, the "New Pool" window uses the "Prestarted" label while the "Edit Pool" window uses the "Prestarted VMs" label. Both of these labels refer to the number of VMs prestarted in the pool. The current release fixes this issue. BZ# 1632055 This release updates the Red Hat Virtualization Manager power saving policy to allow VM migration from over-utilized hosts to under-utilized hosts to ensure proper balancing. BZ# 1633975 RHVH was missing a package named pam_pkcs11. Consequently, the rule for pam_pkcs11 in PAM is added, but the module does not exist, so users cannot login. The missing pam_pkcs11 package was added, and now users can login to RHVH if the correct security profile is applied. BZ# 1634239 oscap-anaconda-addon was changed to read the datastream file based on the OS name and version. Consequently, the addon looks for a datastream file named "ssg-rhvh4-ds.xml," which does not exist, so no OSCAP profiles are shown. The relevant OSCAP profiles for RHVH reside in ssg-rhel7-ds.xml, so a symlink was added named ssg-rhvh4-ds.xml that references ssg-rhel7-ds.xml. BZ# 1635304 This release allows users in Red Hat Virtualization Manager to view the full path of the host group in the host group drop-down list to facilitate host group configuration. BZ# 1635405 This release adds a log entry at the WARN level if an attempt is made to move a disk with a damaged ancestor. A workaround solution is to leverage the REST API to move the disk between storage domains. BZ# 1635845 This release ensures the clearing of the VM uptime during a guest operating system reboot, and the uptime that does display corresponds to the guest operating system. BZ# 1635942 Previously, while cloning a virtual machine with a Direct LUN attached, the Administration Portal showed the clone task as red (failed). The current release fixes this issue and displays the clone task as running until it is complete. BZ# 1636028 Previously, Red Hat Virtualization Host entered emergency mode when it was updated to the latest version and rebooted twice. This was due to the presence of a local disk WWID in /etc/multipath/wwids. In the current release, /etc/multipath/wwids has been removed. During upgrades, imgbased now calls "vdsm-tool configure --force" in the new layer, using the SYSTEMD_IGNORE_CHROOT environment variable. BZ# 1636331 Previously, trying to update a disk attribute using the /api/disks/{disk_id} API failed without an error. The current release fixes this issue. BZ# 1637765 Previously, when deploying the Self-Hosted Engine from the Cockpit, an error message appeared together with an explanation that ping issues were encountered. However, under certain conditions the explanation would disappear, leaving only a generic error message "Please correct errors before moving to the step." In this release, if ping errors are encountered during deployment, the user will be informed of the issue, and the message will remain in the error window until the issue is resolved. BZ# 1638096 The self-hosted engine backup and restore flow has been improved, and now works correctly when the self-hosted engine storage domain is defined as the master storage domain. BZ# 1638124 This release enables VM configuration with memory greater than two terabytes. BZ# 1638606 Previously, the default ntp.conf file was migrated to chrony even when NTP was disabled, overwriting chrony.conf file with incorrect values. In the current release, ntp.conf is only migrated if NTP is enabled. BZ# 1639630 This release sets the proper REST API parameters during the VM creation to allow and make the VM available for use immediately. BZ# 1640016 The default CPU type in Red Hat Virtualization 4.2 is deprecated in Red Hat Virtualization 4.3. Previously, when you used the Edit Cluster dialog to create a new cluster or edit an existing cluster, changing the cluster compatibility version from 4.2 to 4.3 when the CPU Architecture was set to x86_64 caused the CPU Type to be set to an invalid setting, resulting in an exception. Now the CPU Type defaults to a valid entry and no exception occurs. BZ# 1640977 This release ensures that all values for Quality of Service links are visible. BZ# 1641536 Previously, after using virt-v2v to import a virtual machine from Xen or VMware environments, the Red Hat Virtualization Manager was incorrectly removing the virtual machine. The Manager was removing the import job too early, causing it to remove the job reference twice. This issue has been fixed in the current release. The Manager only removes the import job after processing is complete. Using virt-v2v to import virtual machines from VmWare and Xen works. BZ# 1643476 Migration bandwidth limit was computed incorrectly from the user-defined settings and set to an incorrect value. In this release, the migration bandwidth limit is now set correctly. BZ# 1643486 This release ensures the value of the migration bandwidth limit is correct. BZ# 1643663 When performing an upgrade, make sure the ovirt-hosted-engine-ha and ovirt-hosted-engine-setup package versions match. BZ# 1643733 Previously, after performing an upgrade, packages that shipped files under /var were not updated correctly as /var was not layered. In this release, if an updated file exists on both the new image and the running system, the original file will be saved as ".imgbak" and the new file will be copied over enabling both the original and new files to reside under /var. BZ# 1643743 Host logs were being filled with vnc_tls errors due to problems with read permissions. In this release, the erroneous logs are no longer recorded by the host. BZ# 1644636 Previously, incorrect parsing of images named 'rhv-toolssetup_x.x_x.iso' caused a NullPointerException (NPE). The current release fixes this issue. This image name can be parsed without causing an exception. BZ# 1645007 Previously, making an API call to the foreman (hosts, hostgroups, compute resources) returned only 20 entries. The current release fixes this issue and displays all of the entries. BZ# 1645395 Previously, imgbased failed upon receiving the e2fsck return code 1 when creating a new layer. In the current release, imgbased handles the e2fsck return code 1 as a success, since the new file system is correct and the new layer is installed successfully. BZ# 1646861 This release ensures Red Hat Virtualization Manager sets the recommended options during the creation of a volume from Red Hat Virtualization Manager to distinguish creating volumes from the Cockpit User Interface. BZ# 1647607 Previously, an incorrectly named USB3 controller, "qemu_xhci," prevented virtual machines from booting if they used a host passthrough with this controller. The current release corrects the controller name to "qemu-xhci," which resolves the booting issue. BZ# 1650177 This release ensures the upgrade process in Red Hat Virtualization Manager sets the configuration value ImageProxyAddress to point to the Red Hat Virtualization Manager FQDN if the configuration value was set to "localhost". BZ# 1650422 Red Hat Virtualization Manager no longer logs messages regarding non-preferred host penalizations if the VM is not configured to have a preferred host. BZ# 1651426 Previously when converting to OpenStack, failed conversions revealed passwords for accessing OpenStack in the wrapper log. This issue has been fixed and passwords are no longer revealed. BZ# 1652519 Previously, during an upgrade, dracut running inside chroot did not detect the cpuinfo and the kernel config files because /proc was not mounted and /boot was bindmounted. As a result, the correct microcode was missing from the initramfs. The current release bindmounts /proc to the chroot and removes the --hostonly flag. This change inserts both AMD and Intel microcodes into the initramfs and boots the host after an upgrade. BZ# 1652795 Previously, even if lvmetad was disabled in the configuration, the lvmetad service left a pid file hanging. As a result, entering lvm commands displayed warnings. The current release masks the lvmetad service during build so it never starts and lvm commands do not show warnings. BZ# 1654417 Previously, if an xlease volume was corrupted, VDSM could not acquire leases and features like high-availability virtual machines did not work. The current release adds rebuild-xleases and format-xleases commands to the VDSM tool. Administrators can use these commands to rebuild or format corrupted xlease volumes. BZ# 1654442 There was a bug in the REST API for non-administrator users related to VNIC Profiles. Consequently, an error message appeared saying "GET_ALL_VNIC_PROFILES failed query execution failed due to insufficient permissions." The code was fixed and the error no longer occurs. BZ# 1655375 Previously, after upgrading to version 4.2 or 4.3, the Compute > Hosts > Network Interfaces page in the Administration Portal did not display host interfaces. Instead, it would throw the following obfuscated exception several times: webadmin-0.js:formatted:176788 Mon Dec 03 11:46:02 GMT+1000 2018 SEVERE: Uncaught exception com.google.gwt.core.client.JavaScriptException: (TypeError) : Cannot read property 'a' of null The current release fixes this issue. BZ# 1655911 In this release, the following changes have been made in the view filters for VMs in the Administration Portal under Compute > Hosts > selected host: New view filter names: - From "Running on host" to "Running on current host" (default view) - From "Pinned to host" to "Pinned to current host" - From "All" to "Both" - when "Both" is selected, a new column named "Attachment to current host" is displayed to indicate that the VM is: "Running on current host" , "Pinned to current host", or "Pinned and Running on current host". BZ# 1656092 Previously, when re-importing a virtual machine as an OVA file, duplicate Image IDs and Disk IDs caused errors while attempting to recreate the image. Also, after a failure, continuing to attempt to attach the image instead of failing immediately caused the error reported. Because the identifiers already existed, Red Hat Virtual Manager could not import the virtual machine OVA file even though the virtual machine name had been changed. This issue has been fixed in the current release. The Red Hat Virtual Manager can regenerate Identifiers. When copying the image, the Manager can use image mapping to correlate the Image ID to the new Image ID. Finally, the Manager can move the attach image handling so that it will not be called when creating a new image if the database fails. As a result, importing virtual machines using OVA files works. BZ# 1657977 Previously, the "Multi Queues enabled" checkbox was missing from the New- or Edit Instance Types window in the Administration Portal. The current release fixes this issue. BZ# 1658976 This bug fix sets the template ID properly to address a null pointer exception during the import of a thin-provisioned VM disk from an Open Virtualization Framework configuration file. BZ# 1660441 This release ensures Red Hat Virtualization Manager defines the attribute subjectAlternativeName correctly during the renaming of the httpd certificate to prevent browser warnings or a certificate rejection. BZ# 1660595 During a self-hosted engine deployment, SSO authentication errors may occur stating that a valid profile cannot be found in credentials and to check the logs for more details. The interim workaround is to retry the authentication attempt more than once. See BZ#1695523 for a specific example involving Kerberos SSO and engine-backup. BZ# 1662321 Previously, when trying to clone a virtual machine from an Active VM snapshot, a 'Failed to get shared "write" lock. Is another process using the image?' error appeared for the following snapshot types: 'ACTIVE', 'STATELESS', 'PREVIEW' and 'NEXT_RUN'. In this release, the cloning operation will be blocked for these snapshot types. BZ# 1662449 If a user with an invalid sudo configuration uses sudo to run commands, sudo appends a "last login" message to the command output. When this happens, VDSM fails to run lvm commands. Previously, the VDSM log did not contain helpful information about what caused those failures. The current release improves error handling in the VDSM code running lvm commands. Now, if VDSM fails, an error message clearly states that there was invalid output from the lvm commands, and shows the output added by sudo. Although this change does not fix the root cause, an invalid sudo configuration, it makes it easier to understand the issue. BZ# 1662878 Self-hosted engine deployment failed when the network interface was defined as other than 'eth0'. In this release, any valid network interface name can be used. BZ# 1663616 In this release, redirection device types are no longer set to unplugged and can now obtain the proper address from the domain xml when supported or from the host when they are not supported. BZ# 1664342 The sorting order in the list of Disks in the Storage tab of the Administration Portal was sorted alphabetically by text values in the Creation Date, instead of by time stamp. In this release, the list is now sorted by the time stamp. BZ# 1664540 A user with a UserRole or a role with a Change CD permit can now change CDs on running VMs in the VM Portal BZ# 1666886 This release updates the Ansible role to configure the Rsyslog Elasticsearch output correctly to ensure the certificate information reaches the Red Hat Virtualization Host. BZ# 1666958 This release ensures the SR-IOV vNIC profile does not undergo an invalid update while the vNIC is plugged in and running on the VM during the validation process. To update the SR-IOV vNIC profile, unplug the vNIC from the VM. After the updates are complete, replug the vNIC into the VM. BZ# 1670370 VDSM attempted to collect OpenStack related information, even on hosts that are not connected to OpenStack, and displayed a repeated error message in the system log. In this release, errors originating from OpenStack related information are not recorded in the system log. As a result, the system log is quieter. BZ# 1674477 Previously, testing of Ansible 2.8 returned deprecation errors and warnings during deployment. The current release fixes this issue. BZ# 1676426 Previously, the Self-Hosted Engine pane in Cockpit had a few minor typos. The current version fixes these issues. BZ# 1676461 updated by engine-setup. If an error occurs, engine-setup treats this is a failure and tries to rollback, which is a risky process. To work around this scenario, the package ovirt-engine-setup-plugin-ovirt-engine now requires ovirt-vmconsole 1.0.7-1. Updating the setup packages with yum should also update ovirt-vmconsole. If an error occurs, yum evaluates it as a non-fatal error. See also bug 1665197 for the actual error from ovirt-vmconsole. BZ# 1676822 Previously, while testing a RHEL 8 build of the virt-v2v daemon that turns a Red Hat Virtualization Host into a conversion host for CloudForms migration, you could not update the network profile of a running virtual machine guest. The current release fixes this issue. BZ# 1685517 This release allows an Ansible playbook to run on isolated, offline nodes. BZ# 1686537 In this release, VM migration is supported when both the origin and destination hosts have Pass-Through Host CPU enabled. BZ# 1688056 This fix includes a signed certificate for rhev-apt.exe until 2022-01-25. BZ# 1690446 This fix ensures the /etc/hosts directory label is correct for SELinux on the Red Hat Virtualization Manager virtual machine. BZ# 1690782 This fix ensures that installing the OVS-2.10 package restarts the OVS/OVN services after the package completes the install and update process. BZ# 1691173 This release ensures that during self-hosted engine deployments, downloading and installing the rhvm-appliance package does not occur if the corresponding OVA file is present. BZ# 1695038 Previously, upgrading from RHV 4.0 to 4.2 failed while using "ovirt-fast-forward-upgrade" tool due to 'eap7-jboss*' dependency issues. The current release includes a patch that fixes this bug. 4.1.2. Enhancements This release of Red Hat Virtualization features the following enhancements: BZ# 1009608 This release allows you to limit east-west traffic of VMs, to enable traffic only between the VM and a gateway. The new filter 'clean-traffic-gateway' has been added to libvirt. With a parameter called GATEWAY_MAC, a user can specify the MAC address of the gateway that is allowed to communicate with the VM and vice versa. Note that users can specify multiple GATEWAY_MACs. There are two possible configurations of VM: 1) A VM with a static IP. This is the recommended setup. It is also recommended to set the parameter CTRL_IP_LEARNING to 'none'. Any other value will result in a leak of initial traffic. This is caused by libvirt's learning mechanism (see https://libvirt.org/formatnwfilter.html#nwfelemsRulesAdvIPAddrDetection and https://bugzilla.redhat.com/show_bug.cgi?id=1647944 for more details). 2) A VM with DHCP. DHCP is working partially. It is not usable in production currently ( https://bugzilla.redhat.com/show_bug.cgi?id=1651499 ). The filter has a general issue with ARP leak ( https://bugzilla.redhat.com/show_bug.cgi?id=1651467 ). Peer VMs are able to see that the VM using this feature exists (in their arp table), but are not able to contact the VM, as the traffic from peers is still blocked by the filter. BZ# 1111783 In the current release, Windows clustering is supported for directly attached LUNs and shared disks. BZ# 1111784 The current release supports Windows clustering for directly attached LUNs and shared disks. BZ# 1155676 In this release, users can now export VM templates to OVA files located on shared storage, and import the OVA files from the shared storage into a different data center. BZ# 1209881 The iptables and iptables-service have been removed from the list of dependencies in self-hosted engine deployment. BZ# 1284775 The current release adds support for memory hot-plug for IBM POWER (ppc64le) virtual machines. BZ# 1286219 In the current release, the disk alias of a cloned virtual machine is Alias_<Cloned-Virtual-Machine-Name> . BZ# 1372134 The current release of the self-hosted engine supports deployment with static IPv6. BZ# 1388098 The current release provides a software hook for the Manager to disable restarting hosts following an outage. For example, this capability would help prevent thermal damage to hardware following an HVAC failure. BZ# 1408584 Previously the REST API did not include the CPU Type when it returned information about the host. Now, the CPU Type is included with the rest of the information concerning the host that the REST API returns, which is consistent with the Administration Portal. BZ# 1439733 In this release, VMs converted to oVirt (from VMware, Xen or OVA) now include RNG device and memory balloon device, provided that the guest OS has the necessary drivers installed. BZ# 1451297 TLSv1 and TLSv1.1 protocols are no longer secure, so they are forcefully disabled, and cannot be enabled, in the VDSM configuration. Only TLSv1.2 and higher versions of the protocol are enabled. The exact TLS version depends on the underlying OpenSSL version. BZ# 1454389 The current release of the Administration Portal supports search queries for virtual machines with a specific cluster compatibility override setting or with a different cluster compatibility override setting (or none): Vms: custom_compatibility_version = X.Y or != X.Y . BZ# 1454673 When renaming a running virtual machine, the new name is now applied immediately, even when the QEMU process is running and is set with the name. In this case, the user is provided with a warning that indicates that the running instance of the virtual machine uses the name. BZ# 1467332 Feature: Support default route role on IPv6-only networks, but only for IPv6 static interface configuration. Reason: oVirt engine should support IPv6 only networks for its existing capabilities. Result: - You can set the default route role on an IPv6-only network provided it has an IPv6 gateway. - For Red Hat Virtualization Manager to correctly report the sync status of the interfaces, configure all of the interfaces with static IPv6 addresses only. Also, configure the IPv6 gateway on the logical network that has the default route role. - IPv6 dynamic configuration is currently not supported. - The IPv6 gateway on the default route role network is applied as the default route for the v6 routing table on the host. - You can set an IPv6 gateway on a non-management network. This was previously possible only on the management network). - If more that one IPv6 gateway is set on the interfaces of a host, the Manager will be in an undefined state: There will be more than one default route entry in the v6 routing table on the host, which causes the host to report that there are no v6 gateways at all (meaning that the interfaces will appear as out of sync in the Manager.) BZ# 1510336 This release adds the ability to manage the MTU of VM networks in a centralized way, enabling oVirt to manage MTU all the way from the host network to the guest in the VM. This feature allows for the consistent use of MTUs in logical networks with small MTU (e.g., tunneled networks) and large MTU (e.g., jumbo frames) in VMs, even without DHCP. BZ# 1510856 Making large snapshots and other abnormal events can pause virtual machines, impacting their system time, and other functions, such as timestamps. The current release provides Guest Time Synchronization, which, after a snapshot is created and the virtual machine is un-paused, uses VDSM and the guest agent to synchronize the system time of the virtual machine with that of the host. The time_sync_snapshot_enable option enables synchronization for snapshots. The time_sync_cont_enable option enables synchronization for abnormal events that may pause virtual machines. By default, these features are disabled for backward compatibility. BZ# 1511234 The new boot_hostdev hook allows virtual machines to boot from passed through host devices such as NIC VF's, PCI-E SAS/RAID Cards, SCSI devices for example without requiring a normal bootable disk from a Red Hat Virtualization storage domain or direct LUN. BZ# 1511891 Previously, copying volumes to preallocated disks was slower than necessary and did not make optimal use of available network resources. In the current release, qemu-img uses out-of-order writing to improve the speed of write operations by up to six times. These operations include importing, moving, and copying large disks to preallocated storage. BZ# 1518697 Red Hat Virtualization Manager setup now uses oVirt Task Oriented Pluggable Installer/Implementation (otopi) to generate its answer files to eliminate the need for additional code or manual input on stated questions. BZ# 1526033 This release enables the export of a VM template to an Open Virtualization Appliance (OVA) file and the import of an OVA file as a VM template to facilitate VM template migration between data centers without using an export domain. BZ# 1527860 This release adds USB qemu-xhci controller support to SPICE consoles, for Q35 chipset support. Red Hat Virtualization now expects that when a BIOS type using the Q35 chipset is chosen, and USB is enabled, that the USB controller will be qemu-xhci. BZ# 1530031 The 'engine-backup' script now has default values for several options, so you do not need to supply values for these options. To see the default values, run 'engine-backup --help'. BZ# 1532969 Previously, virtual machines could only boot from BIOS. The current release adds support for booting virtual machines via UEFI firmware, a free, newer, more modern way to initialize a system. BZ# 1539829 This feature provides support for adding security groups and rules using the ovirt-provider-ovn package, as described by the OpenStack Networking API. BZ# 1542136 Feature: Auto persist changes on SetupNetworks Instruct VDSM to commit any changes applied during setup networks immediately upon successful completion of the setup networks process and if connectivity is successfully re-established with the Red Hat Virtualization Manager. If this flag is not specified in the request, it is assumed that it was set to false, which is backward compatible with the behavior. When setupNetworks is invoked from the Administration Portal, the default is 'true'. When it is invoked with a REST API call, the default is 'false'. When it is invoked from an ansible script, the default is 'true'. Reason: When the commit was not part of the setupNetworks request, the following commit request issued by the Manager upon successful re-establishment of the connection with VDSM would sometimes fail, leaving the configuration in a non-persisted state although the intention was to persist it. Result: The configuration is persisted immediately. BZ# 1553902 The current release of the User Interface Plugin API supports the updated Administration Portal design with the following changes: - Custom secondary menu items can be added to the vertical navigation menu. - Some functions have been renamed for consistency with the new Administration Portal design. A deprecation notice is displayed when the old names are used. - Some functions no longer support the alignRight parameter because the tabs are aligned horizontally, flowing from left to right. BZ# 1559694 If a VM does not use virtual NUMA nodes, it is better if its whole memory can fit into a single NUMA node on the host. Otherwise, there may be some performance overhead. There are two additions in this RFE: A new warning message is shown in the audit log if a VM is run on a host where its memory cannot fit into a single host NUMA node. A new policy unit is added to the scheduler: 'Fit VM to single host NUMA node'. When starting a VM, this policy prefers hosts where the VM can fit into a single NUMA node. This unit is not active by default, because it can cause undesired edge cases. For example, the policy unit would cause the following behavior when starting multiple VMs: In the following setup: 9 hosts with 16 GB per NUMA node 1 host with 4 GB per NUMA node When multiple VMs with 6 GB of memory are scheduled, the scheduling unit would prevent them from starting on the host with 4 GB per NUMA node, no matter how overloaded the other hosts are. It would use the last host only when all the others do not have enough free memory to run the VM. BZ# 1560132 In the Administration Portal, it is possible to set a threshold for cluster level monitoring as a percentage or an absolute value, for example, 95% or 2048 MB. When usage exceeds 95% or free memory falls below 2048 MB, a "high memory usage" or "low memory available" event is logged. This reduces log clutter for clusters with large (1.5 TB) amounts of memory. BZ# 1561033 The current release adds AMD SMT-awareness to VDSM and RHV-M. This change helps meet the constraints of schedulers and software that are licensed per-core. It also improves cache coherency for VMs by presenting a more accurate view of the CPU topology. As a result, SMT works as expected on AMD CPUs. BZ# 1561413 In the current release of the Red Hat Virtualization Manager, the "Remove" option is disabled if a virtual machine is delete-protected. BZ# 1561539 A new option, Activate Host After Install, has been added to the Administration Portal under Compute > Hosts, in the New Host or Edit Host screen. This option is selected by default. BZ# 1563271 An Ansible role, ovirt-host-deploy-spice-encryption , has been added to change the cypher string for SPICE consoles. The default cypher string satisfies FIPS requirements ('TLSv1.2+FIPS:kRSA+FIPS:!eNULL:!aNULL'). The role can be customized with the Ansible variable host_deploy_spice_cipher_string . BZ# 1570040 This release adds support for external OpenID Connect authentication using Keycloak in both the user interface and the REST API. BZ# 1570077 The current release of the User Interface Plugin API provides an "unload" handler that can be attached to a primary/secondary menu item or a details tab to perform clean-up when the user navigates away from these interface elements. BZ# 1571024 This feature provides the ability to enable live migration for HP VMs (and, in general, to all VM types with pinning settings). Previously, Red Hat Virtualization 4.2 added a new High-Performance VM profile type. This required configuration settings including pinning the VM to a host based on the host-specific configuration. Due to the pinning settings, the migration option for the HP VM type was automatically forced to be disabled. Now, Red Hat Virtualization 4.3 provides the ability for live migration of HP VMs (and all other VMs with a pinned configuration like NUMA pinning, CPU pinning, and CPU passthrough enabled). For more details, see the feature page: https://ovirt.org/develop/release-management/features/virt/high-performance-vm-migration.html BZ# 1571283 Previously, changing log levels required editing libvirt.conf and restarting the libvirtd service. This restart prevented support from collecting data and made reproducing issues more difficult. The current release adds the libvirt-admin package to the optional channel for Red Hat Virtualization Host. Installing this package enables you to run the virt-admin command to change libvirt logging levels on the fly. BZ# 1571371 High-performance virtual machines require pinning to multiple hosts to be highly-available. Previously virtual machines with NUMA pinning enabled could not be configured to run on more than one host. Now virtual machines with NUMA pinning enabled can be configured to run on one or more hosts. All hosts need to support NUMA pinning, and the NUMA pinning configuration needs to be compatible with all assigned hosts. BZ# 1571399 The current release of the User Interface Plugin API provides greater control over the placement of action buttons. BZ# 1574494 This update adds support for bare-metal machines based on IBM POWER9 CPUs running hypervisors on the RHEL-ALT host operating system. These hypervisors can run virtual machines with POWER8 or POWER9 virtual CPUs. This update also adds support for live migration of virtual machines with POWER8 virtual CPUs between hosts based on either POWER8 or POWER9 CPUs. BZ# 1578339 This release provides an Ansible role to ensure the correct shutdown of Red Hat Virtualization Manager or a Red Hat Hyperconverged Infrastructure environment. BZ# 1578775 The qemufwcfg driver has been added for the built-in firmware configuration (fw_cfg) system device on Windows 10 and Windows Server 2016 guests. As a result, fw_cfg devices are now identified correctly in the Device Manager on these guests. BZ# 1578782 The virtio-smbus driver installer for the built-in SMBus device on Windows 2008 guests has been added to the RHV Windows Guest Tools. As a result, SMBus devices are now identified correctly in the Device Manager on these guests. BZ# 1580346 In this release, the cluster property "set maintenance reason" is enabled by default. BZ# 1585008 The current release adds a new 'ssl_ciphers' option to VDSM, which enables you to configure available ciphers for encrypted connections (for example, between the Manager and VDSM, or between VDSM and VDSM). The values this option uses conform to the OpenSSL standard. For more information, see https://access.redhat.com/articles/4056301 BZ# 1588498 With this release, the size of the rhvm package has been reduced. BZ# 1590202 This release adds a feature to control toast notifications. Once any notifications are showing, "Dismiss" and "Do not disturb" buttons will appear that allow the user to silence notifications. BZ# 1592853 In this release, ovirt-log-collector now supports batch mode. BZ# 1597085 A new option has been added to the Administration Portal under Compute > Clusters in the Console configuration screen: Enable VNC Encryption BZ# 1598141 In this release, self-hosted engine installation supports Ansible playbooks that use tags. BZ# 1598318 The openscap, openscap-utils and scap-security-guide packages have been added to RHVH in order to increase security hardening in RHVH deployments. BZ# 1598391 Red Hat OpenStack Platform 14's OVN+neutron is now certified as an external network provider for Red Hat Virtualization 4.3. BZ# 1602968 Previously, "Power Off" was missing from the virtual machine context menu in the Administration Portal; although it was present in versions, it was removed as part of the new user interface in 4.2. Now, "Power Off" is once again present when a running virtual machine is right-clicked. BZ# 1609139 Previously, you could only assign one vGPU device type (mdev_type) to a virtual machine in the Administration Portal. The current release adds support for assigning multiple Nvidia vGPU device types to a single virtual machine. BZ# 1611889 This feature allows the user to select the cloud-init protocol with which to create a virtual machine's network configuration. The protocol can be selected while creating or editing a VM, or while starting a VM with Run Once. In older versions of cloud-init, backward compatibility needed to be maintained with the ENI protocol, whereas on newer cloud-init versions the OpenStack-Metadata protocol is supported. BZ# 1615348 In this release, an Ansible playbook enables you to deploy the Metrics Store on a single node or on multiple nodes and to scale out an existing deployment. BZ# 1615974 The current release replaces Fluentd with Rsyslog, which can collect oVirt logs, engine.log, VDSM logs, and collectd metrics. Systems upgraded from 4.2 will still have Fluentd installed, but it will be disabled and stopped. After upgrading to 4.3, you can remove the Fluentd packages. Fluentd will not be supported in RHEL 8. Rsyslog offers better performance. Rsyslog can output to Elasticsearch on Red Hat OpenShift Container Platform. Sending data to your own instance of Elasticsearch is not currently supported. Collectd is reconfigured to use write_syslog, a new plugin, to send metrics to Rsyslog. When deploying ovirt metrics, Rsyslog is configured on the Red Hat Virtualization Manager and host to collect and ship the data to the requested target. BZ# 1616415 Virtual machines can be forcibly shut down in the VM Portal. BZ# 1619210 In the past, high-performance virtual machines were pinned to specific hosts and did not support live migration. The current release enables live migration of high-performance virtual machines, as well as virtual machines with NUMA pinning, CPU pinning, or CPU passthrough enabled. BZ# 1619391 In the current release, invoking the ovirt-aaa-jdbc-tool logs the following three events to the syslog server: the user who invokes the ovirt-aaa-jdbc-tool; the parameters passed to ovirt-aaa-jdbc-tool except filter passwords; and whether invoking ovirt-aaa-jdbc-tool was successful. BZ# 1620569 Qemu Guest Agent packages for several Linux distributions have been added to make it easier to install the guest agent offline. BZ# 1620594 In this release, virt-v2v attempts to install the QEMU Guest Agent on Linux guests during VM conversion. For this feature to work properly, a current RHV guest tools ISO must be attached during the conversion. BZ# 1625543 When Importing KVM VMs and Sparseness is specified, the actual Disk Size should be preserved to improve the performance of the Import as well as to conserve disk space on the Destination Storage Domain. Previously, when you set thin provisioning for importing a KVM-based VM into a Red Hat Virtualization environment, the disk size of the VM within the Red Hat Virtualization storage domain was inflated to the volume size or larger, even when the original KVM-based VM was much smaller. KVM Sparseness is now supported so that when you import a virtual machine with thin provisioning enabled into a Red Hat Virtualization environment, the disk size of the original virtual machine image is preserved. However, KVM Sparseness is not supported for Block Storage Domains. BZ# 1625612 This release adds support for importing VMware virtual machines that include snapshots. BZ# 1629437 As part of replacing Fluentd with Rsyslog, the RHEL Ansible role logging , from the linux-system-roles collection of roles, is responsible for deploying Rsyslog configuration files and service handling for multiple projects. This role is maintained by RHEL and makes Rsyslog deployment easier and more maintainable. In this release, the Rsyslog service and configuration are deployed on the oVirt engine and hosts using this role when you deploy oVirt metrics. BZ# 1630243 During virtual machine live migration, the migration progress bar is now also shown in the host's Virtual Machine tab. BZ# 1631587 In this release, the Correlation-Id can be passed to the vdsm-client by using the '--flow-id' argument with the vdsm-client tool. BZ# 1636256 In versions, it was not possible to limit the number of simultaneous sessions for each user, so active sessions could significantly grow up until they expired. Now, Red Hat Virtualization Manager 4.3 introduces the ENGINE_MAX_USER_SESSIONS option, which can limit simultaneous sessions per user. The default value is -1 and allows unlimited sessions per user. To limit the number of simultaneous sessions per user, create the 99-limit-user-sessions.conf file in /etc/ovirt-engine/engine.conf.d and add ENGINE_MAX_USER_SESSIONS=NNN, where NNN is the maximum number of allowed simultaneous sessions per user. Save and restart using: systemctl restart ovirt-engine. BZ# 1637015 With this release, users can now disable pop-up notifications. When a pop-up notification appears in the Administration Portal, the following options are now available for disabling notifications: - Dismiss All - Do Not Disturb - for 10 minutes - for 1 hour - for 1 day - until Log In BZ# 1641125 Previously, version 4.2.0 added support for vGPUs and used a Consolidated ("depth-first") allocation policy. The current release adds support for a Separated ("breadth-first") allocation policy. The default policy is the Consolidated allocation policy. BZ# 1644693 Previously, for virtual machines with a Windows 10 guest, the host CPU load was too high. The current release reduces the CPU load by adding enlightenments that enable the hypervisor synthetic interrupt controller (SynIC) and stimer. For example, with this enhancement, the host CPU load of a virtual machine running an idle Windows 10 guest should be approximately 0-5%. BZ# 1651225 Red Hat Enterprise Linux 8 is fully supported as a guest operating system. Note that GNOME single sign-on functionality, guest application list, and guest-side hooks are not supported. BZ# 1651255 You can now set the number of IO threads in the new/edit VM dialog in the Administration Portal, instead of just the REST API. BZ# 1654253 The current release presents the OpenSCAP security profile as an option to users installing and upgrading Red Hat Virtualization Hosts. This feature helps organizations comply with the Security Content Automation Protocol (SCAP) standards. BZ# 1656794 This release disables the "Remove" button on the Everyone permissions page to prevent misconfiguring Red Hat Virtualization Manager permissions. BZ# 1661921 The release ensures the Red Hat Virtualization internal OVN database connections and OpenStack REST APIs use TLS 1.2 and HIGH ciphers to address configurable OVN internal connections and the default Red Hat Enterprise Linux version 7 OpenSSL configuration allowing insecure ciphers. BZ# 1663288 The current release updates the QEMU post-copy migration policy from a Technology Preview to a Supported Feature. As a cautionary note, a network failure during migration results in a virtual machine in an inconsistent state, which cannot be recovered by the Manager. Administrators using this feature should be aware of the potential for data loss. BZ# 1664490 This release enhancement preserves a virtual machine's time zone setting of a virtual machine when moving the virtual machine from one cluster to a different cluster. BZ# 1665072 In this release, the write_syslog collectd plugin is now automatically installed on the system running the ovirt-engine service to provide metrics store support. BZ# 1665073 In this release, the write_syslog collectd plugin is now automatically installed on managed hosts for metrics store support. BZ# 1667842 Previously, the background process to migrate virtual machines considered affinity groups. This release updates the background process to migrate virtual machines to consider both affinity groups and affinity labels. BZ# 1669047 In order to create and use a Managed block storage domain, a new database must be created that is accessible by cinderlib. In this release, a new database can be created during the engine-setup process, using the same procedures described in the documentation for "Configuring the Red Hat Virtualization Manager". BZ# 1671074 In this release, the available SSL ciphers used in communication between the Red Hat Virtualization Manager and VDSM have been limited, and now exclude weak or anonymous ciphers. BZ# 1673303 In this release, the IPv6 default route of a host is managed by restricting the IPv6 default gateways so that there is only one such gateway for all host interfaces. Note that: 1. When the default route role is moved away from a network, its IPv6 gateway is automatically removed from the corresponding interface. 2. After moving the default route role to a new network, you should set a static IPv6 gateway on this network. 3. If the host and Red Hat Virtualization Manager are not on the same subnet, the Manager will lose connectivity with the host on moving the default route role between networks (see note 1). You should take precautions to avoid this scenario. BZ# 1679133 The current release ships a new version of Red Hat Gluster Storage, RHGS 3.4.4, in Red Hat Virtualization Host (RHVH). BZ# 1693279 This enhancement installs the v2v-conversion-host-wrapper RPM by default on Red Hat Virtualization Host. 4.1.3. Technology Preview The items listed in this section are provided as Technology Previews. For further information on the scope of Technology Preview status, and the associated support implications, refer to Technology Preview Features Support Scope . BZ# 1636749 This technology preview includes the flexvolume-driver and volume-provisioner component to enable dynamic storage provisioning for OpenShift Container Platform deployed on Red Hat Virtualization virtual machines. The container can use any of the existing storage technologies Red Hat Virtualization supports. 4.1.4. Rebase: Bug Fixes Only The items listed in this section are bugs that were originally resolved in the community version and included in this release. BZ# 1625591 Previously, after importing and removing a Kernel-based Virtual Machine (KVM), trying to re-import the same virtual machine failed with a "Job ID already exists" error. The current release deletes completed import jobs from the VDSM. You can re-import a virtual machine without encountering the same error. 4.1.5. Release Notes This section outlines important details about the release, including recommended practices and notable changes to Red Hat Virtualization. You must take this information into account to ensure the best possible outcomes for your deployment. BZ# 1304300 Large guest operating systems have a significant overhead on the host. The host requires a consecutive non-swapped block of memory that is 1/128th of the virtual machine's memory size. Previously, this overhead was not accounted for when scheduling the virtual machine. If the memory requirement was not satisfied, the virtual machine failed to start with an error message similar to this one: "libvirtError: internal error: process exited while connecting to monitor: ... qemu-kvm: Failed to allocate HTAB of requested size, try with smaller maxmem" The current release fixes this issue by using dynamic hash page table resizing. BZ# 1403674 This release allows Red Hat Virtualization Manager to set a display network and open a console to a virtual machine over an IPv6 only network. BZ# 1511697 Previously, an administrator with the ClusterAdmin role was able to modify the self-hosted engine virtual machine, which could cause damage. In the current release, only a SuperUser can modify a self-hosted engine and its storage domain. BZ# 1514004 The TLSv1 and TLSv1.1 protocols are no longer secure. In the current release, they have been forcefully disabled in the VDSM configuration and cannot be enabled. Only TLSv1.2 and higher versions of the protocol are enabled. The exact version enabled depends on the underlying OpenSSL version. BZ# 1550634 This release removes the Red Hat Virtualization Manager support for clusters levels 3.6 and 4.0. Customers must upgrade their data centers to Red Hat Virtualization Manager 4.1 or later before upgrading to Red Hat Virtualization Manager 4.3. BZ# 1579819 This release updates the command sequence for Preparing Local Storage for Red Hat Virtualization Hosts by adding a command to mount the logical volume. BZ# 1597705 Previously, in the VM Portal, users who did not have permissions to create virtual machines could see the Create VM button. The current release fixes this issue by fetching user permissions and then using them to show or hide the Create VM button. BZ# 1599321 There are inconsistencies in the following internal configuration options: - HotPlugCpuSupported - HotUnplugCpuSupported - HotPlugMemorySupported - HotUnplugMemorySupported - IsMigrationSupported - IsMemorySnapshotSupported - IsSuspendSupported - ClusterRequiredRngSourcesDefault Systems that have upgraded from RHV 4.0 to RHV 4.1/4.2 and are experiencing problems with these features should upgrade to RHV 4.2.5 or later. BZ# 1599617 Previously, the VM Portal displayed all clusters, regardless of user permissions. The current release fixes this issue by fetching user permissions and displaying only those clusters which the user has permissions to use. BZ# 1609884 In this release, the oVirt release package for master, ovirt-release-master, enables a new repository hosted on the Cool Other Package Repositories (COPR) service for delivering ovirt-web-ui packages. BZ# 1627753 The current release replaces Fluentd with Rsyslog for collecting oVirt logs and collectd metrics. Hosts upgraded from 4.2 will still have Fluentd installed, but the service is disabled and stopped. After upgrading to 4.3, you can remove the Fluentd packages. BZ# 1627756 The current release replaces Fluentd with Rsyslog for collecting oVirt logs and collectd metrics. Systems upgraded from 4.2 will still have Fluentd installed but it will be disabled and stopped. After upgrading to 4.3, you can remove the Fluentd packages. BZ# 1651140 Red Hat Virtualization Manager now requires JBoss Enterprise Application Platform. BZ# 1653291 Context-sensitive help has been removed from Red Hat Virtualization (RHV) 4.3. RHV user interfaces no longer include small question mark icons for displaying context-sensitive help information. To access the RHV documentation, use the RHV welcome page and the Red Hat Documentation tab. BZ# 1655115 The current release removes the VDSM daemon's support for cluster levels 3.6/4.0 and Red Hat Virtualization Manager 3.6/4.0. This means that VDSM from RHV 4.3 cannot be used with the Manager from RHV 3.6/4.0. To use the new version of VDSM, upgrade the Manager to version 4.1 or later. BZ# 1671635 oVirt now requires WildFly version 15.0.1 or later. BZ# 1697297 Previously, Python-openvswitch used a compiled C extension wrapper within the library for speedier JSON processing. The memory object used to store the JSON response was not freed and was leaked. The current release fixes this issue by de-allocating the memory stored for the JSON parser so the memory is recovered. 4.1.6. Known Issues These known issues exist in Red Hat Virtualization at this time: BZ# 1073434 When the ISO Uploader uploads ISO image files, it sets the file permissions incorrectly to -rw-r-----. Because the permissions for "other" are none, the ISO files are not visible in the Administration Portal. Although the ISO Uploader has been deprecated, it is still available. To work around the permissions issue, set the ISO file permissions to -rw-r- r-- by entering: chmod 644 filename.iso Verify that the system is configured as described in the "Preparing and Adding NFS Storage" section of the Administration Guide for Red Hat Virtualization. The above recommendations may also apply if you encounter permissions/visibility issues while using the following alternatives to the ISO Uploader: * Manually copying an ISO file to the ISO storage domain, as described in https://access.redhat.com/solutions/46518/ . * In version 4.2 of Red Hat Virtualization onward, uploading virtual disk images and ISO images to the data storage domain using the Administration Portal or REST API. BZ# 1146115 If the same iSCSI target is used to create two or more storage domains, even if the storage domain is put into maintenance mode, the iscsi session does not get logged out. Red Hat recommends to use different iSCSI targets to create different storage domains. To work around this issue, restart the hypervisor host. BZ# 1543411 In the current release, Q35 machines cannot support more than 500 devices. BZ# 1636254 VDSM uses lldpad. Due to a bug, lldpad confuses NetXtreme II BCM57810 FCoE-enabled cards. When the VDSM configuration enables lldpad to read lldp data from the card, it renders the card unusable. To work around this issue, set enable_lldp=false in vdsm.conf.d and restart VDSM. Check that lldpad is disabled on all relevant interfaces by entering the command, "lldptool get-lldp -i USDifname adminStatus". If lldp is enabled, disable it by entering "lldptool set-lldp -i USDifname adminStatus=disabled". After ensuring that lldp support is disabled in VDSM, networking should be unaffected. 4.1.7. Deprecated Functionality The items in this section are either no longer supported or will no longer be supported in a future release. BZ# 1381223 With this update, ovirt-image-uploader has been retired. In Red Hat Virtualization 4.0 ovirt-image-uploader was deprecated in favor of ovirt-imageio. BZ# 1399709 The ovirt-shell tool has been deprecated since RHV 4.0 and has not been updated since. It is included in RHV 4.3 and later, in order not to break existing scripts, but the tool is now unsupported. BZ# 1399750 Version 3 of the REST API has been deprecated as of RHV version 4.0. It will not be supported from RHV version 4.3, along with the ovirt-shell and version 3 of the Python SDK Guide, Ruby SDK Guide, and Java SDK Guide. BZ# 1533086 The "Scan Alignment" feature in the versions of the Administration Portal is only relevant to guest OSes that are outdated and unsupported. The current release removes this "Scan Alignment" feature, along with historical records of disks being aligned or misaligned. BZ# 1540921 Conroe and Penryn CPU types are no longer supported. They will not appear as options for Compatibility Version 4.3, and a warning is displayed for older versions. BZ# 1627636 The ovirt-engine-cli package uses the version 3 REST API which is deprecated and unsupported. With this update, ovirt-engine-cli is no longer a dependency and is not installed by default.
null
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.3/html/release_notes/chap-release_notes
Chapter 2. Managing compute machines with the Machine API
Chapter 2. Managing compute machines with the Machine API 2.1. Creating a compute machine set on Alibaba Cloud You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Alibaba Cloud. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.1.1. Sample YAML for a compute machine set custom resource on Alibaba Cloud This sample YAML defines a compute machine set that runs in a specified Alibaba Cloud zone in a region and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<zone> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 10 spec: metadata: labels: node-role.kubernetes.io/<role>: "" providerSpec: value: apiVersion: machine.openshift.io/v1 credentialsSecret: name: alibabacloud-credentials imageId: <image_id> 11 instanceType: <instance_type> 12 kind: AlibabaCloudMachineProviderConfig ramRoleName: <infrastructure_id>-role-worker 13 regionId: <region> 14 resourceGroup: 15 id: <resource_group_id> type: ID securityGroups: - tags: 16 - Key: Name Value: <infrastructure_id>-sg-<role> type: Tags systemDisk: 17 category: cloud_essd size: <disk_size> tag: 18 - Key: kubernetes.io/cluster/<infrastructure_id> Value: owned userDataSecret: name: <user_data_secret> 19 vSwitch: tags: 20 - Key: Name Value: <infrastructure_id>-vswitch-<zone> type: Tags vpcId: "" zoneId: <zone> 21 1 5 7 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI ( oc ) installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 3 8 9 Specify the node label to add. 4 6 10 Specify the infrastructure ID, node label, and zone. 11 Specify the image to use. Use an image from an existing default compute machine set for the cluster. 12 Specify the instance type you want to use for the compute machine set. 13 Specify the name of the RAM role to use for the compute machine set. Use the value that the installer populates in the default compute machine set. 14 Specify the region to place machines on. 15 Specify the resource group and type for the cluster. You can use the value that the installer populates in the default compute machine set, or specify a different one. 16 18 20 Specify the tags to use for the compute machine set. Minimally, you must include the tags shown in this example, with appropriate values for your cluster. You can include additional tags, including the tags that the installer populates in the default compute machine set it creates, as needed. 17 Specify the type and size of the root disk. Use the category value that the installer populates in the default compute machine set it creates. If required, specify a different value in gigabytes for size . 19 Specify the name of the secret in the user data YAML file that is in the openshift-machine-api namespace. Use the value that the installer populates in the default compute machine set. 21 Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. 2.1.1.1. Machine set parameters for Alibaba Cloud usage statistics The default compute machine sets that the installer creates for Alibaba Cloud clusters include nonessential tag values that Alibaba Cloud uses internally to track usage statistics. These tags are populated in the securityGroups , tag , and vSwitch parameters of the spec.template.spec.providerSpec.value list. When creating compute machine sets to deploy additional machines, you must include the required Kubernetes tags. The usage statistics tags are applied by default, even if they are not specified in the compute machine sets you create. You can also include additional tags as needed. The following YAML snippets indicate which tags in the default compute machine sets are optional and which are required. Tags in spec.template.spec.providerSpec.value.securityGroups spec: template: spec: providerSpec: value: securityGroups: - tags: - Key: kubernetes.io/cluster/<infrastructure_id> 1 Value: owned - Key: GISV Value: ocp - Key: sigs.k8s.io/cloud-provider-alibaba/origin 2 Value: ocp - Key: Name Value: <infrastructure_id>-sg-<role> 3 type: Tags 1 2 Optional: This tag is applied even when not specified in the compute machine set. 3 Required. where: <infrastructure_id> is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. <role> is the node label to add. Tags in spec.template.spec.providerSpec.value.tag spec: template: spec: providerSpec: value: tag: - Key: kubernetes.io/cluster/<infrastructure_id> 1 Value: owned - Key: GISV 2 Value: ocp - Key: sigs.k8s.io/cloud-provider-alibaba/origin 3 Value: ocp 2 3 Optional: This tag is applied even when not specified in the compute machine set. 1 Required. where <infrastructure_id> is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. Tags in spec.template.spec.providerSpec.value.vSwitch spec: template: spec: providerSpec: value: vSwitch: tags: - Key: kubernetes.io/cluster/<infrastructure_id> 1 Value: owned - Key: GISV 2 Value: ocp - Key: sigs.k8s.io/cloud-provider-alibaba/origin 3 Value: ocp - Key: Name Value: <infrastructure_id>-vswitch-<zone> 4 type: Tags 1 2 3 Optional: This tag is applied even when not specified in the compute machine set. 4 Required. where: <infrastructure_id> is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. <zone> is the zone within your region to place machines on. 2.1.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.2. Creating a compute machine set on AWS You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Amazon Web Services (AWS). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.2.1. Sample YAML for a compute machine set custom resource on AWS This sample YAML defines a compute machine set that runs in the us-east-1a Amazon Web Services (AWS) zone and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role>-<zone> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 3 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 4 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machine-role: <role> 6 machine.openshift.io/cluster-api-machine-type: <role> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 8 spec: metadata: labels: node-role.kubernetes.io/<role>: "" 9 providerSpec: value: ami: id: ami-046fe691f52a953f9 10 apiVersion: awsproviderconfig.openshift.io/v1beta1 blockDevices: - ebs: iops: 0 volumeSize: 120 volumeType: gp2 credentialsSecret: name: aws-cloud-credentials deviceIndex: 0 iamInstanceProfile: id: <infrastructure_id>-worker-profile 11 instanceType: m6i.large kind: AWSMachineProviderConfig placement: availabilityZone: <zone> 12 region: <region> 13 securityGroups: - filters: - name: tag:Name values: - <infrastructure_id>-worker-sg 14 subnet: filters: - name: tag:Name values: - <infrastructure_id>-private-<zone> 15 tags: - name: kubernetes.io/cluster/<infrastructure_id> 16 value: owned - name: <custom_tag_name> 17 value: <custom_tag_value> 18 userDataSecret: name: worker-user-data 1 3 5 11 14 16 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 4 8 Specify the infrastructure ID, role node label, and zone. 6 7 9 Specify the role node label to add. 10 Specify a valid Red Hat Enterprise Linux CoreOS (RHCOS) Amazon Machine Image (AMI) for your AWS zone for your OpenShift Container Platform nodes. If you want to use an AWS Marketplace image, you must complete the OpenShift Container Platform subscription from the AWS Marketplace to obtain an AMI ID for your region. USD oc -n openshift-machine-api \ -o jsonpath='{.spec.template.spec.providerSpec.value.ami.id}{"\n"}' \ get machineset/<infrastructure_id>-<role>-<zone> 17 18 Optional: Specify custom tag data for your cluster. For example, you might add an admin contact email address by specifying a name:value pair of Email:[email protected] . Note Custom tags can also be specified during installation in the install-config.yml file. If the install-config.yml file and the machine set include a tag with the same name data, the value for the tag from the machine set takes priority over the value for the tag in the install-config.yml file. 12 Specify the zone, for example, us-east-1a . 13 Specify the region, for example, us-east-1 . 15 Specify the infrastructure ID and zone. 2.2.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml If you need compute machine sets in other availability zones, repeat this process to create more compute machine sets. Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.2.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.2.4. Machine set options for the Amazon EC2 Instance Metadata Service You can use machine sets to create machines that use a specific version of the Amazon EC2 Instance Metadata Service (IMDS). Machine sets can create machines that allow the use of both IMDSv1 and IMDSv2 or machines that require the use of IMDSv2. Note Using IMDSv2 is only supported on AWS clusters that were created with OpenShift Container Platform version 4.7 or later. To deploy new compute machines with your preferred IMDS configuration, create a compute machine set YAML file with the appropriate values. You can also edit an existing machine set to create new machines with your preferred IMDS configuration when the machine set is scaled up. Important Before configuring a machine set to create machines that require IMDSv2, ensure that any workloads that interact with the AWS metadata service support IMDSv2. 2.2.4.1. Configuring IMDS by using machine sets You can specify whether to require the use of IMDSv2 by adding or editing the value of metadataServiceOptions.authentication in the machine set YAML file for your machines. Prerequisites To use IMDSv2, your AWS cluster must have been created with OpenShift Container Platform version 4.7 or later. Procedure Add or edit the following lines under the providerSpec field: providerSpec: value: metadataServiceOptions: authentication: Required 1 1 To require IMDSv2, set the parameter value to Required . To allow the use of both IMDSv1 and IMDSv2, set the parameter value to Optional . If no value is specified, both IMDSv1 and IMDSv2 are allowed. 2.2.5. Machine sets that deploy machines as Dedicated Instances You can create a machine set running on AWS that deploys machines as Dedicated Instances. Dedicated Instances run in a virtual private cloud (VPC) on hardware that is dedicated to a single customer. These Amazon EC2 instances are physically isolated at the host hardware level. The isolation of Dedicated Instances occurs even if the instances belong to different AWS accounts that are linked to a single payer account. However, other instances that are not dedicated can share hardware with Dedicated Instances if they belong to the same AWS account. Instances with either public or dedicated tenancy are supported by the Machine API. Instances with public tenancy run on shared hardware. Public tenancy is the default tenancy. Instances with dedicated tenancy run on single-tenant hardware. 2.2.5.1. Creating Dedicated Instances by using machine sets You can run a machine that is backed by a Dedicated Instance by using Machine API integration. Set the tenancy field in your machine set YAML file to launch a Dedicated Instance on AWS. Procedure Specify a dedicated tenancy under the providerSpec field: providerSpec: placement: tenancy: dedicated 2.2.6. Machine sets that deploy machines as Spot Instances You can save on costs by creating a compute machine set running on AWS that deploys machines as non-guaranteed Spot Instances. Spot Instances utilize unused AWS EC2 capacity and are less expensive than On-Demand Instances. You can use Spot Instances for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. AWS EC2 can terminate a Spot Instance at any time. AWS gives a two-minute warning to the user when an interruption occurs. OpenShift Container Platform begins to remove the workloads from the affected instances when AWS issues the termination warning. Interruptions can occur when using Spot Instances for the following reasons: The instance price exceeds your maximum price The demand for Spot Instances increases The supply of Spot Instances decreases When AWS terminates an instance, a termination handler running on the Spot Instance node deletes the machine resource. To satisfy the compute machine set replicas quantity, the compute machine set creates a machine that requests a Spot Instance. 2.2.6.1. Creating Spot Instances by using compute machine sets You can launch a Spot Instance on AWS by adding spotMarketOptions to your compute machine set YAML file. Procedure Add the following line under the providerSpec field: providerSpec: value: spotMarketOptions: {} You can optionally set the spotMarketOptions.maxPrice field to limit the cost of the Spot Instance. For example you can set maxPrice: '2.50' . If the maxPrice is set, this value is used as the hourly maximum spot price. If it is not set, the maximum price defaults to charge up to the On-Demand Instance price. Note It is strongly recommended to use the default On-Demand price as the maxPrice value and to not set the maximum price for Spot Instances. 2.2.7. Adding a GPU node to an existing OpenShift Container Platform cluster You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the AWS EC2 cloud provider. For more information about the supported instance types, see the following NVIDIA documentation: NVIDIA GPU Operator Community support matrix NVIDIA AI Enterprise support matrix Procedure View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific AWS region and OpenShift Container Platform role. USD oc get nodes Example output NAME STATUS ROLES AGE VERSION ip-10-0-52-50.us-east-2.compute.internal Ready worker 3d17h v1.26.0 ip-10-0-58-24.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 ip-10-0-68-148.us-east-2.compute.internal Ready worker 3d17h v1.26.0 ip-10-0-68-68.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 ip-10-0-72-170.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 ip-10-0-74-50.us-east-2.compute.internal Ready worker 3d17h v1.26.0 View the machines and machine sets that exist in the openshift-machine-api namespace by running the following command. Each compute machine set is associated with a different availability zone within the AWS region. The installer automatically load balances compute machines across availability zones. USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE preserve-dsoc12r4-ktjfc-worker-us-east-2a 1 1 1 1 3d11h preserve-dsoc12r4-ktjfc-worker-us-east-2b 2 2 2 2 3d11h View the machines that exist in the openshift-machine-api namespace by running the following command. At this time, there is only one compute machine per machine set, though a compute machine set could be scaled to add a node in a particular region and zone. USD oc get machines -n openshift-machine-api | grep worker Example output preserve-dsoc12r4-ktjfc-worker-us-east-2a-dts8r Running m5.xlarge us-east-2 us-east-2a 3d11h preserve-dsoc12r4-ktjfc-worker-us-east-2b-dkv7w Running m5.xlarge us-east-2 us-east-2b 3d11h preserve-dsoc12r4-ktjfc-worker-us-east-2b-k58cw Running m5.xlarge us-east-2 us-east-2b 3d11h Make a copy of one of the existing compute MachineSet definitions and output the result to a JSON file by running the following command. This will be the basis for the GPU-enabled compute machine set definition. USD oc get machineset preserve-dsoc12r4-ktjfc-worker-us-east-2a -n openshift-machine-api -o json > <output_file.json> Edit the JSON file and make the following changes to the new MachineSet definition: Replace worker with gpu . This will be the name of the new machine set. Change the instance type of the new MachineSet definition to g4dn , which includes an NVIDIA Tesla T4 GPU. To learn more about AWS g4dn instance types, see Accelerated Computing . USD jq .spec.template.spec.providerSpec.value.instanceType preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json "g4dn.xlarge" The <output_file.json> file is saved as preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json . Update the following fields in preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json : .metadata.name to a name containing gpu . .spec.selector.matchLabels["machine.openshift.io/cluster-api-machineset"] to match the new .metadata.name . .spec.template.metadata.labels["machine.openshift.io/cluster-api-machineset"] to match the new .metadata.name . .spec.template.spec.providerSpec.value.instanceType to g4dn.xlarge . To verify your changes, perform a diff of the original compute definition and the new GPU-enabled node definition by running the following command: USD oc -n openshift-machine-api get preserve-dsoc12r4-ktjfc-worker-us-east-2a -o json | diff preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json - Example output 10c10 < "name": "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a", --- > "name": "preserve-dsoc12r4-ktjfc-worker-us-east-2a", 21c21 < "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a" --- > "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-us-east-2a" 31c31 < "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a" --- > "machine.openshift.io/cluster-api-machineset": "preserve-dsoc12r4-ktjfc-worker-us-east-2a" 60c60 < "instanceType": "g4dn.xlarge", --- > "instanceType": "m5.xlarge", Create the GPU-enabled compute machine set from the definition by running the following command: USD oc create -f preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json Example output machineset.machine.openshift.io/preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a created Verification View the machine set you created by running the following command: USD oc -n openshift-machine-api get machinesets | grep gpu The MachineSet replica count is set to 1 so a new Machine object is created automatically. Example output preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a 1 1 1 1 4m21s View the Machine object that the machine set created by running the following command: USD oc -n openshift-machine-api get machines | grep gpu Example output preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a running g4dn.xlarge us-east-2 us-east-2a 4m36s Note that there is no need to specify a namespace for the node. The node definition is cluster scoped. 2.2.8. Deploying the Node Feature Discovery Operator After the GPU-enabled node is created, you need to discover the GPU-enabled node so it can be scheduled. To do this, install the Node Feature Discovery (NFD) Operator. The NFD Operator identifies hardware device features in nodes. It solves the general problem of identifying and cataloging hardware resources in the infrastructure nodes so they can be made available to OpenShift Container Platform. Procedure Install the Node Feature Discovery Operator from OperatorHub in the OpenShift Container Platform console. After installing the NFD Operator into OperatorHub , select Node Feature Discovery from the installed Operators list and select Create instance . This installs the nfd-master and nfd-worker pods, one nfd-worker pod for each compute node, in the openshift-nfd namespace. Verify that the Operator is installed and running by running the following command: USD oc get pods -n openshift-nfd Example output NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d Browse to the installed Oerator in the console and select Create Node Feature Discovery . Select Create to build a NFD custom resource. This creates NFD pods in the openshift-nfd namespace that poll the OpenShift Container Platform nodes for hardware resources and catalogue them. Verification After a successful build, verify that a NFD pod is running on each nodes by running the following command: USD oc get pods -n openshift-nfd Example output NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d The NFD Operator uses vendor PCI IDs to identify hardware in a node. NVIDIA uses the PCI ID 10de . View the NVIDIA GPU discovered by the NFD Operator by running the following command: USD oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci' Example output Roles: worker feature.node.kubernetes.io/pci-1013.present=true feature.node.kubernetes.io/pci-10de.present=true feature.node.kubernetes.io/pci-1d0f.present=true 10de appears in the node feature list for the GPU-enabled node. This mean the NFD Operator correctly identified the node from the GPU-enabled MachineSet. 2.3. Creating a compute machine set on Azure You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Microsoft Azure. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.3.1. Sample YAML for a compute machine set custom resource on Azure This sample YAML defines a compute machine set that runs in the 1 Microsoft Azure zone in a region and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> name: <infrastructure_id>-<role>-<region> 3 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> spec: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-machineset: <machineset_name> node-role.kubernetes.io/<role>: "" providerSpec: value: apiVersion: azureproviderconfig.openshift.io/v1beta1 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api image: 4 offer: "" publisher: "" resourceID: /resourceGroups/<infrastructure_id>-rg/providers/Microsoft.Compute/galleries/gallery_<infrastructure_id>/images/<infrastructure_id>-gen2/versions/latest 5 sku: "" version: "" internalLoadBalancer: "" kind: AzureMachineProviderSpec location: <region> 6 managedIdentity: <infrastructure_id>-identity metadata: creationTimestamp: null natRule: null networkResourceGroup: "" osDisk: diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: "" resourceGroup: <infrastructure_id>-rg sshPrivateKey: "" sshPublicKey: "" tags: - name: <custom_tag_name> 7 value: <custom_tag_value> subnet: <infrastructure_id>-<role>-subnet userDataSecret: name: worker-user-data vmSize: Standard_D4s_v3 vnet: <infrastructure_id>-vnet zone: "1" 8 1 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster You can obtain the subnet by running the following command: USD oc -n openshift-machine-api \ -o jsonpath='{.spec.template.spec.providerSpec.value.subnet}{"\n"}' \ get machineset/<infrastructure_id>-worker-centralus1 You can obtain the vnet by running the following command: USD oc -n openshift-machine-api \ -o jsonpath='{.spec.template.spec.providerSpec.value.vnet}{"\n"}' \ get machineset/<infrastructure_id>-worker-centralus1 2 Specify the node label to add. 3 Specify the infrastructure ID, node label, and region. 4 Specify the image details for your compute machine set. If you want to use an Azure Marketplace image, see "Selecting an Azure Marketplace image". 5 Specify an image that is compatible with your instance type. The Hyper-V generation V2 images created by the installation program have a -gen2 suffix, while V1 images have the same name without the suffix. 6 Specify the region to place machines on. 7 Optional: Specify custom tags in your machine set. Provide the tag name in <custom_tag_name> field and the corresponding tag value in <custom_tag_value> field. 8 Specify the zone within your region to place machines on. Ensure that your region supports the zone that you specify. Important If your region supports availability zones, you must specify the zone. Specifying the zone avoids volume node affinity failure when a pod requires a persistent volume attachment. To do this, you can create a compute machine set for each zone in the same region. 2.3.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.3.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.3.4. Selecting an Azure Marketplace image You can create a machine set running on Azure that deploys machines that use the Azure Marketplace offering. To use this offering, you must first obtain the Azure Marketplace image. When obtaining your image, consider the following: While the images are the same, the Azure Marketplace publisher is different depending on your region. If you are located in North America, specify redhat as the publisher. If you are located in EMEA, specify redhat-limited as the publisher. The offer includes a rh-ocp-worker SKU and a rh-ocp-worker-gen1 SKU. The rh-ocp-worker SKU represents a Hyper-V generation version 2 VM image. The default instance types used in OpenShift Container Platform are version 2 compatible. If you plan to use an instance type that is only version 1 compatible, use the image associated with the rh-ocp-worker-gen1 SKU. The rh-ocp-worker-gen1 SKU represents a Hyper-V version 1 VM image. Important Installing images with the Azure marketplace is not supported on clusters with 64-bit ARM instances. Prerequisites You have installed the Azure CLI client (az) . Your Azure account is entitled for the offer and you have logged into this account with the Azure CLI client. Procedure Display all of the available OpenShift Container Platform images by running one of the following commands: North America: USD az vm image list --all --offer rh-ocp-worker --publisher redhat -o table Example output Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- -------------- rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocpworker:4.8.2021122100 4.8.2021122100 rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100 EMEA: USD az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table Example output Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- -------------- rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.8.2021122100 4.8.2021122100 rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100 Note Regardless of the version of OpenShift Container Platform that you install, the correct version of the Azure Marketplace image to use is 4.8. If required, your VMs are automatically upgraded as part of the installation process. Inspect the image for your offer by running one of the following commands: North America: USD az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version> EMEA: USD az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version> Review the terms of the offer by running one of the following commands: North America: USD az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version> EMEA: USD az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version> Accept the terms of the offering by running one of the following commands: North America: USD az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker:<version> EMEA: USD az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version> Record the image details of your offer, specifically the values for publisher , offer , sku , and version . Add the following parameters to the providerSpec section of your machine set YAML file using the image details for your offer: Sample providerSpec image values for Azure Marketplace machines providerSpec: value: image: offer: rh-ocp-worker publisher: redhat resourceID: "" sku: rh-ocp-worker type: MarketplaceWithPlan version: 4.8.2021122100 2.3.5. Enabling Azure boot diagnostics You can enable boot diagnostics on Azure machines that your machine set creates. Prerequisites Have an existing Microsoft Azure cluster. Procedure Add the diagnostics configuration that is applicable to your storage type to the providerSpec field in your machine set YAML file: For an Azure Managed storage account: providerSpec: diagnostics: boot: storageAccountType: AzureManaged 1 1 Specifies an Azure Managed storage account. For an Azure Unmanaged storage account: providerSpec: diagnostics: boot: storageAccountType: CustomerManaged 1 customerManaged: storageAccountURI: https://<storage-account>.blob.core.windows.net 2 1 Specifies an Azure Unmanaged storage account. 2 Replace <storage-account> with the name of your storage account. Note Only the Azure Blob Storage data service is supported. Verification On the Microsoft Azure portal, review the Boot diagnostics page for a machine deployed by the machine set, and verify that you can see the serial logs for the machine. 2.3.6. Machine sets that deploy machines as Spot VMs You can save on costs by creating a compute machine set running on Azure that deploys machines as non-guaranteed Spot VMs. Spot VMs utilize unused Azure capacity and are less expensive than standard VMs. You can use Spot VMs for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. Azure can terminate a Spot VM at any time. Azure gives a 30-second warning to the user when an interruption occurs. OpenShift Container Platform begins to remove the workloads from the affected instances when Azure issues the termination warning. Interruptions can occur when using Spot VMs for the following reasons: The instance price exceeds your maximum price The supply of Spot VMs decreases Azure needs capacity back When Azure terminates an instance, a termination handler running on the Spot VM node deletes the machine resource. To satisfy the compute machine set replicas quantity, the compute machine set creates a machine that requests a Spot VM. 2.3.6.1. Creating Spot VMs by using compute machine sets You can launch a Spot VM on Azure by adding spotVMOptions to your compute machine set YAML file. Procedure Add the following line under the providerSpec field: providerSpec: value: spotVMOptions: {} You can optionally set the spotVMOptions.maxPrice field to limit the cost of the Spot VM. For example you can set maxPrice: '0.98765' . If the maxPrice is set, this value is used as the hourly maximum spot price. If it is not set, the maximum price defaults to -1 and charges up to the standard VM price. Azure caps Spot VM prices at the standard price. Azure will not evict an instance due to pricing if the instance is set with the default maxPrice . However, an instance can still be evicted due to capacity restrictions. Note It is strongly recommended to use the default standard VM price as the maxPrice value and to not set the maximum price for Spot VMs. 2.3.7. Machine sets that deploy machines on Ephemeral OS disks You can create a compute machine set running on Azure that deploys machines on Ephemeral OS disks. Ephemeral OS disks use local VM capacity rather than remote Azure Storage. This configuration therefore incurs no additional cost and provides lower latency for reading, writing, and reimaging. Additional resources For more information, see the Microsoft Azure documentation about Ephemeral OS disks for Azure VMs . 2.3.7.1. Creating machines on Ephemeral OS disks by using compute machine sets You can launch machines on Ephemeral OS disks on Azure by editing your compute machine set YAML file. Prerequisites Have an existing Microsoft Azure cluster. Procedure Edit the custom resource (CR) by running the following command: USD oc edit machineset <machine-set-name> where <machine-set-name> is the compute machine set that you want to provision machines on Ephemeral OS disks. Add the following to the providerSpec field: providerSpec: value: ... osDisk: ... diskSettings: 1 ephemeralStorageLocation: Local 2 cachingType: ReadOnly 3 managedDisk: storageAccountType: Standard_LRS 4 ... 1 2 3 These lines enable the use of Ephemeral OS disks. 4 Ephemeral OS disks are only supported for VMs or scale set instances that use the Standard LRS storage account type. Important The implementation of Ephemeral OS disk support in OpenShift Container Platform only supports the CacheDisk placement type. Do not change the placement configuration setting. Create a compute machine set using the updated configuration: USD oc create -f <machine-set-config>.yaml Verification On the Microsoft Azure portal, review the Overview page for a machine deployed by the compute machine set, and verify that the Ephemeral OS disk field is set to OS cache placement . 2.3.8. Machine sets that deploy machines with ultra disks as data disks You can create a machine set running on Azure that deploys machines with ultra disks. Ultra disks are high-performance storage that are intended for use with the most demanding data workloads. You can also create a persistent volume claim (PVC) that dynamically binds to a storage class backed by Azure ultra disks and mounts them to pods. Note Data disks do not support the ability to specify disk throughput or disk IOPS. You can configure these properties by using PVCs. Additional resources Microsoft Azure ultra disks documentation Machine sets that deploy machines on ultra disks using CSI PVCs Machine sets that deploy machines on ultra disks using in-tree PVCs 2.3.8.1. Creating machines with ultra disks by using machine sets You can deploy machines with ultra disks on Azure by editing your machine set YAML file. Prerequisites Have an existing Microsoft Azure cluster. Procedure Create a custom secret in the openshift-machine-api namespace using the worker data secret by running the following command: USD oc -n openshift-machine-api \ get secret <role>-user-data \ 1 --template='{{index .data.userData | base64decode}}' | jq > userData.txt 2 1 Replace <role> with worker . 2 Specify userData.txt as the name of the new custom secret. In a text editor, open the userData.txt file and locate the final } character in the file. On the immediately preceding line, add a , . Create a new line after the , and add the following configuration details: "storage": { "disks": [ 1 { "device": "/dev/disk/azure/scsi1/lun0", 2 "partitions": [ 3 { "label": "lun0p1", 4 "sizeMiB": 1024, 5 "startMiB": 0 } ] } ], "filesystems": [ 6 { "device": "/dev/disk/by-partlabel/lun0p1", "format": "xfs", "path": "/var/lib/lun0p1" } ] }, "systemd": { "units": [ 7 { "contents": "[Unit]\nBefore=local-fs.target\n[Mount]\nWhere=/var/lib/lun0p1\nWhat=/dev/disk/by-partlabel/lun0p1\nOptions=defaults,pquota\n[Install]\nWantedBy=local-fs.target\n", 8 "enabled": true, "name": "var-lib-lun0p1.mount" } ] } 1 The configuration details for the disk that you want to attach to a node as an ultra disk. 2 Specify the lun value that is defined in the dataDisks stanza of the machine set you are using. For example, if the machine set contains lun: 0 , specify lun0 . You can initialize multiple data disks by specifying multiple "disks" entries in this configuration file. If you specify multiple "disks" entries, ensure that the lun value for each matches the value in the machine set. 3 The configuration details for a new partition on the disk. 4 Specify a label for the partition. You might find it helpful to use hierarchical names, such as lun0p1 for the first partition of lun0 . 5 Specify the total size in MiB of the partition. 6 Specify the filesystem to use when formatting a partition. Use the partition label to specify the partition. 7 Specify a systemd unit to mount the partition at boot. Use the partition label to specify the partition. You can create multiple partitions by specifying multiple "partitions" entries in this configuration file. If you specify multiple "partitions" entries, you must specify a systemd unit for each. 8 For Where , specify the value of storage.filesystems.path . For What , specify the value of storage.filesystems.device . Extract the disabling template value to a file called disableTemplating.txt by running the following command: USD oc -n openshift-machine-api get secret <role>-user-data \ 1 --template='{{index .data.disableTemplating | base64decode}}' | jq > disableTemplating.txt 1 Replace <role> with worker . Combine the userData.txt file and disableTemplating.txt file to create a data secret file by running the following command: USD oc -n openshift-machine-api create secret generic <role>-user-data-x5 \ 1 --from-file=userData=userData.txt \ --from-file=disableTemplating=disableTemplating.txt 1 For <role>-user-data-x5 , specify the name of the secret. Replace <role> with worker . Copy an existing Azure MachineSet custom resource (CR) and edit it by running the following command: USD oc edit machineset <machine-set-name> where <machine-set-name> is the machine set that you want to provision machines with ultra disks. Add the following lines in the positions indicated: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet spec: template: spec: metadata: labels: disk: ultrassd 1 providerSpec: value: ultraSSDCapability: Enabled 2 dataDisks: 3 - nameSuffix: ultrassd lun: 0 diskSizeGB: 4 deletionPolicy: Delete cachingType: None managedDisk: storageAccountType: UltraSSD_LRS userDataSecret: name: <role>-user-data-x5 4 1 Specify a label to use to select a node that is created by this machine set. This procedure uses disk.ultrassd for this value. 2 3 These lines enable the use of ultra disks. For dataDisks , include the entire stanza. 4 Specify the user data secret created earlier. Replace <role> with worker . Create a machine set using the updated configuration by running the following command: USD oc create -f <machine-set-name>.yaml Verification Validate that the machines are created by running the following command: USD oc get machines The machines should be in the Running state. For a machine that is running and has a node attached, validate the partition by running the following command: USD oc debug node/<node-name> -- chroot /host lsblk In this command, oc debug node/<node-name> starts a debugging shell on the node <node-name> and passes a command with -- . The passed command chroot /host provides access to the underlying host OS binaries, and lsblk shows the block devices that are attached to the host OS machine. steps To use an ultra disk from within a pod, create a workload that uses the mount point. Create a YAML file similar to the following example: apiVersion: v1 kind: Pod metadata: name: ssd-benchmark1 spec: containers: - name: ssd-benchmark1 image: nginx ports: - containerPort: 80 name: "http-server" volumeMounts: - name: lun0p1 mountPath: "/tmp" volumes: - name: lun0p1 hostPath: path: /var/lib/lun0p1 type: DirectoryOrCreate nodeSelector: disktype: ultrassd 2.3.8.2. Troubleshooting resources for machine sets that enable ultra disks Use the information in this section to understand and recover from issues you might encounter. 2.3.8.2.1. Incorrect ultra disk configuration If an incorrect configuration of the ultraSSDCapability parameter is specified in the machine set, the machine provisioning fails. For example, if the ultraSSDCapability parameter is set to Disabled , but an ultra disk is specified in the dataDisks parameter, the following error message appears: StorageAccountType UltraSSD_LRS can be used only when additionalCapabilities.ultraSSDEnabled is set. To resolve this issue, verify that your machine set configuration is correct. 2.3.8.2.2. Unsupported disk parameters If a region, availability zone, or instance size that is not compatible with ultra disks is specified in the machine set, the machine provisioning fails. Check the logs for the following error message: failed to create vm <machine_name>: failure sending request for machine <machine_name>: cannot create vm: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code="BadRequest" Message="Storage Account type 'UltraSSD_LRS' is not supported <more_information_about_why>." To resolve this issue, verify that you are using this feature in a supported environment and that your machine set configuration is correct. 2.3.8.2.3. Unable to delete disks If the deletion of ultra disks as data disks is not working as expected, the machines are deleted and the data disks are orphaned. You must delete the orphaned disks manually if desired. 2.3.9. Enabling customer-managed encryption keys for a machine set You can supply an encryption key to Azure to encrypt data on managed disks at rest. You can enable server-side encryption with customer-managed keys by using the Machine API. An Azure Key Vault, a disk encryption set, and an encryption key are required to use a customer-managed key. The disk encryption set must be in a resource group where the Cloud Credential Operator (CCO) has granted permissions. If not, an additional reader role is required to be granted on the disk encryption set. Prerequisites Create an Azure Key Vault instance . Create an instance of a disk encryption set . Grant the disk encryption set access to key vault . Procedure Configure the disk encryption set under the providerSpec field in your machine set YAML file. For example: providerSpec: value: osDisk: diskSizeGB: 128 managedDisk: diskEncryptionSet: id: /subscriptions/<subscription_id>/resourceGroups/<resource_group_name>/providers/Microsoft.Compute/diskEncryptionSets/<disk_encryption_set_name> storageAccountType: Premium_LRS Additional resources Azure documentation about customer-managed keys 2.3.10. Accelerated Networking for Microsoft Azure VMs Accelerated Networking uses single root I/O virtualization (SR-IOV) to provide Microsoft Azure VMs with a more direct path to the switch. This enhances network performance. This feature can be enabled during or after installation. 2.3.10.1. Limitations Consider the following limitations when deciding whether to use Accelerated Networking: Accelerated Networking is only supported on clusters where the Machine API is operational. Although the minimum requirement for an Azure worker node is two vCPUs, Accelerated Networking requires an Azure VM size that includes at least four vCPUs. To satisfy this requirement, you can change the value of vmSize in your machine set. For information about Azure VM sizes, see Microsoft Azure documentation . When this feature is enabled on an existing Azure cluster, only newly provisioned nodes are affected. Currently running nodes are not reconciled. To enable the feature on all nodes, you must replace each existing machine. This can be done for each machine individually, or by scaling the replicas down to zero, and then scaling back up to your desired number of replicas. 2.3.11. Adding a GPU node to an existing OpenShift Container Platform cluster You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the Azure cloud provider. The following table lists the validated instance types: vmSize NVIDIA GPU accelerator Maximum number of GPUs Architecture Standard_NC24s_v3 V100 4 x86 Standard_NC4as_T4_v3 T4 1 x86 ND A100 v4 A100 8 x86 Note By default, Azure subscriptions do not have a quota for the Azure instance types with GPU. Customers have to request a quota increase for the Azure instance families listed above. Procedure View the machines and machine sets that exist in the openshift-machine-api namespace by running the following command. Each compute machine set is associated with a different availability zone within the Azure region. The installer automatically load balances compute machines across availability zones. USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-worker-centralus1 1 1 1 1 6h9m myclustername-worker-centralus2 1 1 1 1 6h9m myclustername-worker-centralus3 1 1 1 1 6h9m Make a copy of one of the existing compute MachineSet definitions and output the result to a YAML file by running the following command. This will be the basis for the GPU-enabled compute machine set definition. USD oc get machineset -n openshift-machine-api myclustername-worker-centralus1 -o yaml > machineset-azure.yaml View the content of the machineset: USD cat machineset-azure.yaml Example machineset-azure.yaml file apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: annotations: machine.openshift.io/GPU: "0" machine.openshift.io/memoryMb: "16384" machine.openshift.io/vCPU: "4" creationTimestamp: "2023-02-06T14:08:19Z" generation: 1 labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker name: myclustername-worker-centralus1 namespace: openshift-machine-api resourceVersion: "23601" uid: acd56e0c-7612-473a-ae37-8704f34b80de spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 template: metadata: labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 spec: lifecycleHooks: {} metadata: {} providerSpec: value: acceleratedNetworking: true apiVersion: machine.openshift.io/v1beta1 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api diagnostics: {} image: offer: "" publisher: "" resourceID: /resourceGroups/myclustername-rg/providers/Microsoft.Compute/galleries/gallery_myclustername_n6n4r/images/myclustername-gen2/versions/latest sku: "" version: "" kind: AzureMachineProviderSpec location: centralus managedIdentity: myclustername-identity metadata: creationTimestamp: null networkResourceGroup: myclustername-rg osDisk: diskSettings: {} diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: myclustername resourceGroup: myclustername-rg spotVMOptions: {} subnet: myclustername-worker-subnet userDataSecret: name: worker-user-data vmSize: Standard_D4s_v3 vnet: myclustername-vnet zone: "1" status: availableReplicas: 1 fullyLabeledReplicas: 1 observedGeneration: 1 readyReplicas: 1 replicas: 1 Make a copy of the machineset-azure.yaml file by running the following command: USD cp machineset-azure.yaml machineset-azure-gpu.yaml Update the following fields in machineset-azure-gpu.yaml : Change .metadata.name to a name containing gpu . Change .spec.selector.matchLabels["machine.openshift.io/cluster-api-machineset"] to match the new .metadata.name. Change .spec.template.metadata.labels["machine.openshift.io/cluster-api-machineset"] to match the new .metadata.name . Change .spec.template.spec.providerSpec.value.vmSize to Standard_NC4as_T4_v3 . Example machineset-azure-gpu.yaml file apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: annotations: machine.openshift.io/GPU: "1" machine.openshift.io/memoryMb: "28672" machine.openshift.io/vCPU: "4" creationTimestamp: "2023-02-06T20:27:12Z" generation: 1 labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker name: myclustername-nc4ast4-gpu-worker-centralus1 namespace: openshift-machine-api resourceVersion: "166285" uid: 4eedce7f-6a57-4abe-b529-031140f02ffa spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 template: metadata: labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 spec: lifecycleHooks: {} metadata: {} providerSpec: value: acceleratedNetworking: true apiVersion: machine.openshift.io/v1beta1 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api diagnostics: {} image: offer: "" publisher: "" resourceID: /resourceGroups/myclustername-rg/providers/Microsoft.Compute/galleries/gallery_myclustername_n6n4r/images/myclustername-gen2/versions/latest sku: "" version: "" kind: AzureMachineProviderSpec location: centralus managedIdentity: myclustername-identity metadata: creationTimestamp: null networkResourceGroup: myclustername-rg osDisk: diskSettings: {} diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: myclustername resourceGroup: myclustername-rg spotVMOptions: {} subnet: myclustername-worker-subnet userDataSecret: name: worker-user-data vmSize: Standard_NC4as_T4_v3 vnet: myclustername-vnet zone: "1" status: availableReplicas: 1 fullyLabeledReplicas: 1 observedGeneration: 1 readyReplicas: 1 replicas: 1 To verify your changes, perform a diff of the original compute definition and the new GPU-enabled node definition by running the following command: USD diff machineset-azure.yaml machineset-azure-gpu.yaml Example output 14c14 < name: myclustername-worker-centralus1 --- > name: myclustername-nc4ast4-gpu-worker-centralus1 23c23 < machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 --- > machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 30c30 < machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 --- > machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 67c67 < vmSize: Standard_D4s_v3 --- > vmSize: Standard_NC4as_T4_v3 Create the GPU-enabled compute machine set from the definition file by running the following command: USD oc create -f machineset-azure-gpu.yaml Example output machineset.machine.openshift.io/myclustername-nc4ast4-gpu-worker-centralus1 created View the machines and machine sets that exist in the openshift-machine-api namespace by running the following command. Each compute machine set is associated with a different availability zone within the Azure region. The installer automatically load balances compute machines across availability zones. USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE clustername-n6n4r-nc4ast4-gpu-worker-centralus1 1 1 1 1 122m clustername-n6n4r-worker-centralus1 1 1 1 1 8h clustername-n6n4r-worker-centralus2 1 1 1 1 8h clustername-n6n4r-worker-centralus3 1 1 1 1 8h View the machines that exist in the openshift-machine-api namespace by running the following command. You can only configure one compute machine per set, although you can scale a compute machine set to add a node in a particular region and zone. USD oc get machines -n openshift-machine-api Example output NAME PHASE TYPE REGION ZONE AGE myclustername-master-0 Running Standard_D8s_v3 centralus 2 6h40m myclustername-master-1 Running Standard_D8s_v3 centralus 1 6h40m myclustername-master-2 Running Standard_D8s_v3 centralus 3 6h40m myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Running centralus 1 21m myclustername-worker-centralus1-rbh6b Running Standard_D4s_v3 centralus 1 6h38m myclustername-worker-centralus2-dbz7w Running Standard_D4s_v3 centralus 2 6h38m myclustername-worker-centralus3-p9b8c Running Standard_D4s_v3 centralus 3 6h38m View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific Azure region and OpenShift Container Platform role. USD oc get nodes Example output NAME STATUS ROLES AGE VERSION myclustername-master-0 Ready control-plane,master 6h39m v1.26.0 myclustername-master-1 Ready control-plane,master 6h41m v1.26.0 myclustername-master-2 Ready control-plane,master 6h39m v1.26.0 myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Ready worker 14m v1.26.0 myclustername-worker-centralus1-rbh6b Ready worker 6h29m v1.26.0 myclustername-worker-centralus2-dbz7w Ready worker 6h29m v1.26.0 myclustername-worker-centralus3-p9b8c Ready worker 6h31m v1.26.0 View the list of compute machine sets: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-worker-centralus1 1 1 1 1 8h myclustername-worker-centralus2 1 1 1 1 8h myclustername-worker-centralus3 1 1 1 1 8h Create the GPU-enabled compute machine set from the definition file by running the following command: USD oc create -f machineset-azure-gpu.yaml View the list of compute machine sets: oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-nc4ast4-gpu-worker-centralus1 1 1 1 1 121m myclustername-worker-centralus1 1 1 1 1 8h myclustername-worker-centralus2 1 1 1 1 8h myclustername-worker-centralus3 1 1 1 1 8h Verification View the machine set you created by running the following command: USD oc get machineset -n openshift-machine-api | grep gpu The MachineSet replica count is set to 1 so a new Machine object is created automatically. Example output myclustername-nc4ast4-gpu-worker-centralus1 1 1 1 1 121m View the Machine object that the machine set created by running the following command: USD oc -n openshift-machine-api get machines | grep gpu Example output myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Running Standard_NC4as_T4_v3 centralus 1 21m Note There is no need to specify a namespace for the node. The node definition is cluster scoped. 2.3.12. Deploying the Node Feature Discovery Operator After the GPU-enabled node is created, you need to discover the GPU-enabled node so it can be scheduled. To do this, install the Node Feature Discovery (NFD) Operator. The NFD Operator identifies hardware device features in nodes. It solves the general problem of identifying and cataloging hardware resources in the infrastructure nodes so they can be made available to OpenShift Container Platform. Procedure Install the Node Feature Discovery Operator from OperatorHub in the OpenShift Container Platform console. After installing the NFD Operator into OperatorHub , select Node Feature Discovery from the installed Operators list and select Create instance . This installs the nfd-master and nfd-worker pods, one nfd-worker pod for each compute node, in the openshift-nfd namespace. Verify that the Operator is installed and running by running the following command: USD oc get pods -n openshift-nfd Example output NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d Browse to the installed Oerator in the console and select Create Node Feature Discovery . Select Create to build a NFD custom resource. This creates NFD pods in the openshift-nfd namespace that poll the OpenShift Container Platform nodes for hardware resources and catalogue them. Verification After a successful build, verify that a NFD pod is running on each nodes by running the following command: USD oc get pods -n openshift-nfd Example output NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d The NFD Operator uses vendor PCI IDs to identify hardware in a node. NVIDIA uses the PCI ID 10de . View the NVIDIA GPU discovered by the NFD Operator by running the following command: USD oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci' Example output Roles: worker feature.node.kubernetes.io/pci-1013.present=true feature.node.kubernetes.io/pci-10de.present=true feature.node.kubernetes.io/pci-1d0f.present=true 10de appears in the node feature list for the GPU-enabled node. This mean the NFD Operator correctly identified the node from the GPU-enabled MachineSet. Additional resources Enabling Accelerated Networking during installation 2.3.12.1. Enabling Accelerated Networking on an existing Microsoft Azure cluster You can enable Accelerated Networking on Azure by adding acceleratedNetworking to your machine set YAML file. Prerequisites Have an existing Microsoft Azure cluster where the Machine API is operational. Procedure Add the following to the providerSpec field: providerSpec: value: acceleratedNetworking: true 1 vmSize: <azure-vm-size> 2 1 This line enables Accelerated Networking. 2 Specify an Azure VM size that includes at least four vCPUs. For information about VM sizes, see Microsoft Azure documentation . steps To enable the feature on currently running nodes, you must replace each existing machine. This can be done for each machine individually, or by scaling the replicas down to zero, and then scaling back up to your desired number of replicas. Verification On the Microsoft Azure portal, review the Networking settings page for a machine provisioned by the machine set, and verify that the Accelerated networking field is set to Enabled . Additional resources Manually scaling a compute machine set 2.4. Creating a compute machine set on Azure Stack Hub You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Microsoft Azure Stack Hub. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.4.1. Sample YAML for a compute machine set custom resource on Azure Stack Hub This sample YAML defines a compute machine set that runs in the 1 Microsoft Azure zone in a region and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<region> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 6 template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 10 spec: metadata: creationTimestamp: null labels: node-role.kubernetes.io/<role>: "" 11 providerSpec: value: apiVersion: machine.openshift.io/v1beta1 availabilitySet: <availability_set> 12 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api image: offer: "" publisher: "" resourceID: /resourceGroups/<infrastructure_id>-rg/providers/Microsoft.Compute/images/<infrastructure_id> 13 sku: "" version: "" internalLoadBalancer: "" kind: AzureMachineProviderSpec location: <region> 14 managedIdentity: <infrastructure_id>-identity 15 metadata: creationTimestamp: null natRule: null networkResourceGroup: "" osDisk: diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: "" resourceGroup: <infrastructure_id>-rg 16 sshPrivateKey: "" sshPublicKey: "" subnet: <infrastructure_id>-<role>-subnet 17 18 userDataSecret: name: worker-user-data 19 vmSize: Standard_DS4_v2 vnet: <infrastructure_id>-vnet 20 zone: "1" 21 1 5 7 13 15 16 17 20 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster You can obtain the subnet by running the following command: USD oc -n openshift-machine-api \ -o jsonpath='{.spec.template.spec.providerSpec.value.subnet}{"\n"}' \ get machineset/<infrastructure_id>-worker-centralus1 You can obtain the vnet by running the following command: USD oc -n openshift-machine-api \ -o jsonpath='{.spec.template.spec.providerSpec.value.vnet}{"\n"}' \ get machineset/<infrastructure_id>-worker-centralus1 2 3 8 9 11 18 19 Specify the node label to add. 4 6 10 Specify the infrastructure ID, node label, and region. 14 Specify the region to place machines on. 21 Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. 12 Specify the availability set for the cluster. 2.4.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Create an availability set in which to deploy Azure Stack Hub compute machines. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <availabilitySet> , <clusterID> , and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.4.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.4.4. Enabling Azure boot diagnostics You can enable boot diagnostics on Azure machines that your machine set creates. Prerequisites Have an existing Microsoft Azure Stack Hub cluster. Procedure Add the diagnostics configuration that is applicable to your storage type to the providerSpec field in your machine set YAML file: For an Azure Managed storage account: providerSpec: diagnostics: boot: storageAccountType: AzureManaged 1 1 Specifies an Azure Managed storage account. For an Azure Unmanaged storage account: providerSpec: diagnostics: boot: storageAccountType: CustomerManaged 1 customerManaged: storageAccountURI: https://<storage-account>.blob.core.windows.net 2 1 Specifies an Azure Unmanaged storage account. 2 Replace <storage-account> with the name of your storage account. Note Only the Azure Blob Storage data service is supported. Verification On the Microsoft Azure portal, review the Boot diagnostics page for a machine deployed by the machine set, and verify that you can see the serial logs for the machine. 2.4.5. Enabling customer-managed encryption keys for a machine set You can supply an encryption key to Azure to encrypt data on managed disks at rest. You can enable server-side encryption with customer-managed keys by using the Machine API. An Azure Key Vault, a disk encryption set, and an encryption key are required to use a customer-managed key. The disk encryption set must be in a resource group where the Cloud Credential Operator (CCO) has granted permissions. If not, an additional reader role is required to be granted on the disk encryption set. Prerequisites Create an Azure Key Vault instance . Create an instance of a disk encryption set . Grant the disk encryption set access to key vault . Procedure Configure the disk encryption set under the providerSpec field in your machine set YAML file. For example: providerSpec: value: osDisk: diskSizeGB: 128 managedDisk: diskEncryptionSet: id: /subscriptions/<subscription_id>/resourceGroups/<resource_group_name>/providers/Microsoft.Compute/diskEncryptionSets/<disk_encryption_set_name> storageAccountType: Premium_LRS Additional resources Azure documentation about customer-managed keys 2.5. Creating a compute machine set on GCP You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Google Cloud Platform (GCP). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.5.1. Sample YAML for a compute machine set custom resource on GCP This sample YAML defines a compute machine set that runs in Google Cloud Platform (GCP) and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" , where <role> is the node label to add. Values obtained by using the OpenShift CLI In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI. Infrastructure ID The <infrastructure_id> string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster Image path The <path_to_image> string is the path to the image that was used to create the disk. If you have the OpenShift CLI installed, you can obtain the path to the image by running the following command: USD oc -n openshift-machine-api \ -o jsonpath='{.spec.template.spec.providerSpec.value.disks[0].image}{"\n"}' \ get machineset/<infrastructure_id>-worker-a Sample GCP MachineSet values apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-w-a namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-w-a template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-w-a spec: metadata: labels: node-role.kubernetes.io/<role>: "" providerSpec: value: apiVersion: gcpprovider.openshift.io/v1beta1 canIPForward: false credentialsSecret: name: gcp-cloud-credentials deletionProtection: false disks: - autoDelete: true boot: true image: <path_to_image> 3 labels: null sizeGb: 128 type: pd-ssd gcpMetadata: 4 - key: <custom_metadata_key> value: <custom_metadata_value> kind: GCPMachineProviderSpec machineType: n1-standard-4 metadata: creationTimestamp: null networkInterfaces: - network: <infrastructure_id>-network subnetwork: <infrastructure_id>-worker-subnet projectID: <project_name> 5 region: us-central1 serviceAccounts: 6 - email: <infrastructure_id>-w@<project_name>.iam.gserviceaccount.com scopes: - https://www.googleapis.com/auth/cloud-platform tags: - <infrastructure_id>-worker userDataSecret: name: worker-user-data zone: us-central1-a 1 For <infrastructure_id> , specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. 2 For <node> , specify the node label to add. 3 Specify the path to the image that is used in current compute machine sets. To use a GCP Marketplace image, specify the offer to use: OpenShift Container Platform: https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-ocp-413-x86-64-202305021736 OpenShift Platform Plus: https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-opp-413-x86-64-202305021736 OpenShift Kubernetes Engine: https://www.googleapis.com/compute/v1/projects/redhat-marketplace-public/global/images/redhat-coreos-oke-413-x86-64-202305021736 4 Optional: Specify custom metadata in the form of a key:value pair. For example use cases, see the GCP documentation for setting custom metadata . 5 For <project_name> , specify the name of the GCP project that you use for your cluster. 6 Specifies a single service account. Multiple service accounts are not supported. 2.5.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.5.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.5.4. Configuring persistent disk types by using machine sets You can configure the type of persistent disk that a machine set deploys machines on by editing the machine set YAML file. For more information about persistent disk types, compatibility, regional availability, and limitations, see the GCP Compute Engine documentation about persistent disks . Procedure In a text editor, open the YAML file for an existing machine set or create a new one. Edit the following line under the providerSpec field: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet ... spec: template: spec: providerSpec: value: disks: type: <pd-disk-type> 1 1 Specify the persistent disk type. Valid values are pd-ssd , pd-standard , and pd-balanced . The default value is pd-standard . Verification Using the Google Cloud console, review the details for a machine deployed by the machine set and verify that the Type field matches the configured disk type. 2.5.5. Configuring Confidential VM by using machine sets By editing the machine set YAML file, you can configure the Confidential VM options that a machine set uses for machines that it deploys. For more information about Confidential Compute features, functionality, and compatibility, see the GCP Compute Engine documentation about Confidential VM . Important Confidential Computing is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . Procedure In a text editor, open the YAML file for an existing machine set or create a new one. Edit the following section under the providerSpec field: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet ... spec: template: spec: providerSpec: value: confidentialCompute: Enabled 1 onHostMaintenance: Terminate 2 machineType: n2d-standard-8 3 ... 1 Specify whether Confidential VM is enabled. Valid values are Disabled or Enabled . 2 Specify the behavior of the VM during a host maintenance event, such as a hardware or software update. For a machine that uses Confidential VM, this value must be set to Terminate , which stops the VM. Confidential VM does not support live VM migration. 3 Specify a machine type that supports Confidential VM. Confidential VM supports the N2D and C2D series of machine types. Verification On the Google Cloud console, review the details for a machine deployed by the machine set and verify that the Confidential VM options match the values that you configured. 2.5.6. Machine sets that deploy machines as preemptible VM instances You can save on costs by creating a compute machine set running on GCP that deploys machines as non-guaranteed preemptible VM instances. Preemptible VM instances utilize excess Compute Engine capacity and are less expensive than normal instances. You can use preemptible VM instances for workloads that can tolerate interruptions, such as batch or stateless, horizontally scalable workloads. GCP Compute Engine can terminate a preemptible VM instance at any time. Compute Engine sends a preemption notice to the user indicating that an interruption will occur in 30 seconds. OpenShift Container Platform begins to remove the workloads from the affected instances when Compute Engine issues the preemption notice. An ACPI G3 Mechanical Off signal is sent to the operating system after 30 seconds if the instance is not stopped. The preemptible VM instance is then transitioned to a TERMINATED state by Compute Engine. Interruptions can occur when using preemptible VM instances for the following reasons: There is a system or maintenance event The supply of preemptible VM instances decreases The instance reaches the end of the allotted 24-hour period for preemptible VM instances When GCP terminates an instance, a termination handler running on the preemptible VM instance node deletes the machine resource. To satisfy the compute machine set replicas quantity, the compute machine set creates a machine that requests a preemptible VM instance. 2.5.6.1. Creating preemptible VM instances by using compute machine sets You can launch a preemptible VM instance on GCP by adding preemptible to your compute machine set YAML file. Procedure Add the following line under the providerSpec field: providerSpec: value: preemptible: true If preemptible is set to true , the machine is labelled as an interruptable-instance after the instance is launched. 2.5.7. Configuring Shielded VM options by using machine sets By editing the machine set YAML file, you can configure the Shielded VM options that a machine set uses for machines that it deploys. For more information about Shielded VM features and functionality, see the GCP Compute Engine documentation about Shielded VM . Procedure In a text editor, open the YAML file for an existing machine set or create a new one. Edit the following section under the providerSpec field: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet ... spec: template: spec: providerSpec: value: shieldedInstanceConfig: 1 integrityMonitoring: Enabled 2 secureBoot: Disabled 3 virtualizedTrustedPlatformModule: Enabled 4 ... 1 In this section, specify any Shielded VM options that you want. 2 Specify whether UEFI Secure Boot is enabled. Valid values are Disabled or Enabled . 3 Specify whether integrity monitoring is enabled. Valid values are Disabled or Enabled . Note When integrity monitoring is enabled, you must not disable virtual trusted platform module (vTPM). 4 Specify whether vTPM is enabled. Valid values are Disabled or Enabled . Verification Using the Google Cloud console, review the details for a machine deployed by the machine set and verify that the Shielded VM options match the values that you configured. Additional resources What is Shielded VM? Secure Boot Virtual Trusted Platform Module (vTPM) Integrity monitoring 2.5.8. Enabling customer-managed encryption keys for a machine set Google Cloud Platform (GCP) Compute Engine allows users to supply an encryption key to encrypt data on disks at rest. The key is used to encrypt the data encryption key, not to encrypt the customer's data. By default, Compute Engine encrypts this data by using Compute Engine keys. You can enable encryption with a customer-managed key in clusters that use the Machine API. You must first create a KMS key and assign the correct permissions to a service account. The KMS key name, key ring name, and location are required to allow a service account to use your key. Note If you do not want to use a dedicated service account for the KMS encryption, the Compute Engine default service account is used instead. You must grant the default service account permission to access the keys if you do not use a dedicated service account. The Compute Engine default service account name follows the service-<project_number>@compute-system.iam.gserviceaccount.com pattern. Procedure To allow a specific service account to use your KMS key and to grant the service account the correct IAM role, run the following command with your KMS key name, key ring name, and location: USD gcloud kms keys add-iam-policy-binding <key_name> \ --keyring <key_ring_name> \ --location <key_ring_location> \ --member "serviceAccount:service-<project_number>@compute-system.iam.gserviceaccount.com" \ --role roles/cloudkms.cryptoKeyEncrypterDecrypter Configure the encryption key under the providerSpec field in your machine set YAML file. For example: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet ... spec: template: spec: providerSpec: value: disks: - type: encryptionKey: kmsKey: name: machine-encryption-key 1 keyRing: openshift-encrpytion-ring 2 location: global 3 projectID: openshift-gcp-project 4 kmsKeyServiceAccount: openshift-service-account@openshift-gcp-project.iam.gserviceaccount.com 5 1 The name of the customer-managed encryption key that is used for the disk encryption. 2 The name of the KMS key ring that the KMS key belongs to. 3 The GCP location in which the KMS key ring exists. 4 Optional: The ID of the project in which the KMS key ring exists. If a project ID is not set, the machine set projectID in which the machine set was created is used. 5 Optional: The service account that is used for the encryption request for the given KMS key. If a service account is not set, the Compute Engine default service account is used. When a new machine is created by using the updated providerSpec object configuration, the disk encryption key is encrypted with the KMS key. 2.5.9. Enabling GPU support for a compute machine set Google Cloud Platform (GCP) Compute Engine enables users to add GPUs to VM instances. Workloads that benefit from access to GPU resources can perform better on compute machines with this feature enabled. OpenShift Container Platform on GCP supports NVIDIA GPU models in the A2 and N1 machine series. Table 2.1. Supported GPU configurations Model name GPU type Machine types [1] NVIDIA A100 nvidia-tesla-a100 a2-highgpu-1g a2-highgpu-2g a2-highgpu-4g a2-highgpu-8g a2-megagpu-16g NVIDIA K80 nvidia-tesla-k80 n1-standard-1 n1-standard-2 n1-standard-4 n1-standard-8 n1-standard-16 n1-standard-32 n1-standard-64 n1-standard-96 n1-highmem-2 n1-highmem-4 n1-highmem-8 n1-highmem-16 n1-highmem-32 n1-highmem-64 n1-highmem-96 n1-highcpu-2 n1-highcpu-4 n1-highcpu-8 n1-highcpu-16 n1-highcpu-32 n1-highcpu-64 n1-highcpu-96 NVIDIA P100 nvidia-tesla-p100 NVIDIA P4 nvidia-tesla-p4 NVIDIA T4 nvidia-tesla-t4 NVIDIA V100 nvidia-tesla-v100 For more information about machine types, including specifications, compatibility, regional availability, and limitations, see the GCP Compute Engine documentation about N1 machine series , A2 machine series , and GPU regions and zones availability . You can define which supported GPU to use for an instance by using the Machine API. You can configure machines in the N1 machine series to deploy with one of the supported GPU types. Machines in the A2 machine series come with associated GPUs, and cannot use guest accelerators. Note GPUs for graphics workloads are not supported. Procedure In a text editor, open the YAML file for an existing compute machine set or create a new one. Specify a GPU configuration under the providerSpec field in your compute machine set YAML file. See the following examples of valid configurations: Example configuration for the A2 machine series: providerSpec: value: machineType: a2-highgpu-1g 1 onHostMaintenance: Terminate 2 restartPolicy: Always 3 1 Specify the machine type. Ensure that the machine type is included in the A2 machine series. 2 When using GPU support, you must set onHostMaintenance to Terminate . 3 Specify the restart policy for machines deployed by the compute machine set. Allowed values are Always or Never . Example configuration for the N1 machine series: providerSpec: value: gpus: - count: 1 1 type: nvidia-tesla-p100 2 machineType: n1-standard-1 3 onHostMaintenance: Terminate 4 restartPolicy: Always 5 1 Specify the number of GPUs to attach to the machine. 2 Specify the type of GPUs to attach to the machine. Ensure that the machine type and GPU type are compatible. 3 Specify the machine type. Ensure that the machine type and GPU type are compatible. 4 When using GPU support, you must set onHostMaintenance to Terminate . 5 Specify the restart policy for machines deployed by the compute machine set. Allowed values are Always or Never . 2.5.10. Adding a GPU node to an existing OpenShift Container Platform cluster You can copy and modify a default compute machine set configuration to create a GPU-enabled machine set and machines for the GCP cloud provider. The following table lists the validated instance types: Instance type NVIDIA GPU accelerator Maximum number of GPUs Architecture a2-highgpu-1g A100 1 x86 n1-standard-4 T4 1 x86 Procedure Make a copy of an existing MachineSet . In the new copy, change the machine set name in metadata.name and in both instances of machine.openshift.io/cluster-api-machineset . Change the instance type to add the following two lines to the newly copied MachineSet : Example a2-highgpu-1g.json file { "apiVersion": "machine.openshift.io/v1beta1", "kind": "MachineSet", "metadata": { "annotations": { "machine.openshift.io/GPU": "0", "machine.openshift.io/memoryMb": "16384", "machine.openshift.io/vCPU": "4" }, "creationTimestamp": "2023-01-13T17:11:02Z", "generation": 1, "labels": { "machine.openshift.io/cluster-api-cluster": "myclustername-2pt9p" }, "name": "myclustername-2pt9p-worker-gpu-a", "namespace": "openshift-machine-api", "resourceVersion": "20185", "uid": "2daf4712-733e-4399-b4b4-d43cb1ed32bd" }, "spec": { "replicas": 1, "selector": { "matchLabels": { "machine.openshift.io/cluster-api-cluster": "myclustername-2pt9p", "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" } }, "template": { "metadata": { "labels": { "machine.openshift.io/cluster-api-cluster": "myclustername-2pt9p", "machine.openshift.io/cluster-api-machine-role": "worker", "machine.openshift.io/cluster-api-machine-type": "worker", "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" } }, "spec": { "lifecycleHooks": {}, "metadata": {}, "providerSpec": { "value": { "apiVersion": "machine.openshift.io/v1beta1", "canIPForward": false, "credentialsSecret": { "name": "gcp-cloud-credentials" }, "deletionProtection": false, "disks": [ { "autoDelete": true, "boot": true, "image": "projects/rhcos-cloud/global/images/rhcos-412-86-202212081411-0-gcp-x86-64", "labels": null, "sizeGb": 128, "type": "pd-ssd" } ], "kind": "GCPMachineProviderSpec", "machineType": "a2-highgpu-1g", "onHostMaintenance": "Terminate", "metadata": { "creationTimestamp": null }, "networkInterfaces": [ { "network": "myclustername-2pt9p-network", "subnetwork": "myclustername-2pt9p-worker-subnet" } ], "preemptible": true, "projectID": "myteam", "region": "us-central1", "serviceAccounts": [ { "email": "[email protected]", "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } ], "tags": [ "myclustername-2pt9p-worker" ], "userDataSecret": { "name": "worker-user-data" }, "zone": "us-central1-a" } } } } }, "status": { "availableReplicas": 1, "fullyLabeledReplicas": 1, "observedGeneration": 1, "readyReplicas": 1, "replicas": 1 } } View the existing nodes, machines, and machine sets by running the following command. Note that each node is an instance of a machine definition with a specific GCP region and OpenShift Container Platform role. USD oc get nodes Example output NAME STATUS ROLES AGE VERSION myclustername-2pt9p-master-0.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 myclustername-2pt9p-master-1.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 myclustername-2pt9p-master-2.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 myclustername-2pt9p-worker-a-mxtnz.c.openshift-qe.internal Ready worker 8h v1.26.0 myclustername-2pt9p-worker-b-9pzzn.c.openshift-qe.internal Ready worker 8h v1.26.0 myclustername-2pt9p-worker-c-6pbg6.c.openshift-qe.internal Ready worker 8h v1.26.0 myclustername-2pt9p-worker-gpu-a-wxcr6.c.openshift-qe.internal Ready worker 4h35m v1.26.0 View the machines and machine sets that exist in the openshift-machine-api namespace by running the following command. Each compute machine set is associated with a different availability zone within the GCP region. The installer automatically load balances compute machines across availability zones. USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-2pt9p-worker-a 1 1 1 1 8h myclustername-2pt9p-worker-b 1 1 1 1 8h myclustername-2pt9p-worker-c 1 1 8h myclustername-2pt9p-worker-f 0 0 8h View the machines that exist in the openshift-machine-api namespace by running the following command. You can only configure one compute machine per set, although you can scale a compute machine set to add a node in a particular region and zone. USD oc get machines -n openshift-machine-api | grep worker Example output myclustername-2pt9p-worker-a-mxtnz Running n2-standard-4 us-central1 us-central1-a 8h myclustername-2pt9p-worker-b-9pzzn Running n2-standard-4 us-central1 us-central1-b 8h myclustername-2pt9p-worker-c-6pbg6 Running n2-standard-4 us-central1 us-central1-c 8h Make a copy of one of the existing compute MachineSet definitions and output the result to a JSON file by running the following command. This will be the basis for the GPU-enabled compute machine set definition. USD oc get machineset myclustername-2pt9p-worker-a -n openshift-machine-api -o json > <output_file.json> Edit the JSON file to make the following changes to the new MachineSet definition: Rename the machine set name by inserting the substring gpu in metadata.name and in both instances of machine.openshift.io/cluster-api-machineset . Change the machineType of the new MachineSet definition to a2-highgpu-1g , which includes an NVIDIA A100 GPU. jq .spec.template.spec.providerSpec.value.machineType ocp_4.13_machineset-a2-highgpu-1g.json "a2-highgpu-1g" The <output_file.json> file is saved as ocp_4.13_machineset-a2-highgpu-1g.json . Update the following fields in ocp_4.13_machineset-a2-highgpu-1g.json : Change .metadata.name to a name containing gpu . Change .spec.selector.matchLabels["machine.openshift.io/cluster-api-machineset"] to match the new .metadata.name . Change .spec.template.metadata.labels["machine.openshift.io/cluster-api-machineset"] to match the new .metadata.name . Change .spec.template.spec.providerSpec.value.MachineType to a2-highgpu-1g . Add the following line under machineType : `"onHostMaintenance": "Terminate". For example: "machineType": "a2-highgpu-1g", "onHostMaintenance": "Terminate", To verify your changes, perform a diff of the original compute definition and the new GPU-enabled node definition by running the following command: USD oc get machineset/myclustername-2pt9p-worker-a -n openshift-machine-api -o json | diff ocp_4.13_machineset-a2-highgpu-1g.json - Example output 15c15 < "name": "myclustername-2pt9p-worker-gpu-a", --- > "name": "myclustername-2pt9p-worker-a", 25c25 < "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" --- > "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-a" 34c34 < "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-gpu-a" --- > "machine.openshift.io/cluster-api-machineset": "myclustername-2pt9p-worker-a" 59,60c59 < "machineType": "a2-highgpu-1g", < "onHostMaintenance": "Terminate", --- > "machineType": "n2-standard-4", Create the GPU-enabled compute machine set from the definition file by running the following command: USD oc create -f ocp_4.13_machineset-a2-highgpu-1g.json Example output machineset.machine.openshift.io/myclustername-2pt9p-worker-gpu-a created Verification View the machine set you created by running the following command: USD oc -n openshift-machine-api get machinesets | grep gpu The MachineSet replica count is set to 1 so a new Machine object is created automatically. Example output myclustername-2pt9p-worker-gpu-a 1 1 1 1 5h24m View the Machine object that the machine set created by running the following command: USD oc -n openshift-machine-api get machines | grep gpu Example output myclustername-2pt9p-worker-gpu-a-wxcr6 Running a2-highgpu-1g us-central1 us-central1-a 5h25m Note Note that there is no need to specify a namespace for the node. The node definition is cluster scoped. 2.5.11. Deploying the Node Feature Discovery Operator After the GPU-enabled node is created, you need to discover the GPU-enabled node so it can be scheduled. To do this, install the Node Feature Discovery (NFD) Operator. The NFD Operator identifies hardware device features in nodes. It solves the general problem of identifying and cataloging hardware resources in the infrastructure nodes so they can be made available to OpenShift Container Platform. Procedure Install the Node Feature Discovery Operator from OperatorHub in the OpenShift Container Platform console. After installing the NFD Operator into OperatorHub , select Node Feature Discovery from the installed Operators list and select Create instance . This installs the nfd-master and nfd-worker pods, one nfd-worker pod for each compute node, in the openshift-nfd namespace. Verify that the Operator is installed and running by running the following command: USD oc get pods -n openshift-nfd Example output NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d Browse to the installed Oerator in the console and select Create Node Feature Discovery . Select Create to build a NFD custom resource. This creates NFD pods in the openshift-nfd namespace that poll the OpenShift Container Platform nodes for hardware resources and catalogue them. Verification After a successful build, verify that a NFD pod is running on each nodes by running the following command: USD oc get pods -n openshift-nfd Example output NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d The NFD Operator uses vendor PCI IDs to identify hardware in a node. NVIDIA uses the PCI ID 10de . View the NVIDIA GPU discovered by the NFD Operator by running the following command: USD oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci' Example output Roles: worker feature.node.kubernetes.io/pci-1013.present=true feature.node.kubernetes.io/pci-10de.present=true feature.node.kubernetes.io/pci-1d0f.present=true 10de appears in the node feature list for the GPU-enabled node. This mean the NFD Operator correctly identified the node from the GPU-enabled MachineSet. 2.6. Creating a compute machine set on IBM Cloud You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on IBM Cloud. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.6.1. Sample YAML for a compute machine set custom resource on IBM Cloud This sample YAML defines a compute machine set that runs in a specified IBM Cloud zone in a region and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<region> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 10 spec: metadata: labels: node-role.kubernetes.io/<role>: "" providerSpec: value: apiVersion: ibmcloudproviderconfig.openshift.io/v1beta1 credentialsSecret: name: ibmcloud-credentials image: <infrastructure_id>-rhcos 11 kind: IBMCloudMachineProviderSpec primaryNetworkInterface: securityGroups: - <infrastructure_id>-sg-cluster-wide - <infrastructure_id>-sg-openshift-net subnet: <infrastructure_id>-subnet-compute-<zone> 12 profile: <instance_profile> 13 region: <region> 14 resourceGroup: <resource_group> 15 userDataSecret: name: <role>-user-data 16 vpc: <vpc_name> 17 zone: <zone> 18 1 5 7 The infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 3 8 9 16 The node label to add. 4 6 10 The infrastructure ID, node label, and region. 11 The custom Red Hat Enterprise Linux CoreOS (RHCOS) image that was used for cluster installation. 12 The infrastructure ID and zone within your region to place machines on. Be sure that your region supports the zone that you specify. 13 Specify the IBM Cloud instance profile . 14 Specify the region to place machines on. 15 The resource group that machine resources are placed in. This is either an existing resource group specified at installation time, or an installer-created resource group named based on the infrastructure ID. 17 The VPC name. 18 Specify the zone within your region to place machines on. Be sure that your region supports the zone that you specify. 2.6.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.6.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.7. Creating a compute machine set on IBM Power Virtual Server You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on IBM Power Virtual Server. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.7.1. Sample YAML for a compute machine set custom resource on IBM Power Virtual Server This sample YAML file defines a compute machine set that runs in a specified IBM Power Virtual Server zone in a region and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<region> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 10 spec: metadata: labels: node-role.kubernetes.io/<role>: "" providerSpec: value: apiVersion: machine.openshift.io/v1 credentialsSecret: name: powervs-credentials image: name: rhcos-<infrastructure_id> 11 type: Name keyPairName: <infrastructure_id>-key kind: PowerVSMachineProviderConfig memoryGiB: 32 network: regex: ^DHCPSERVER[0-9a-z]{32}_PrivateUSD type: RegEx processorType: Shared processors: "0.5" serviceInstance: id: <ibm_power_vs_service_instance_id> type: ID 12 systemType: s922 userDataSecret: name: <role>-user-data 1 5 7 The infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 3 8 9 The node label to add. 4 6 10 The infrastructure ID, node label, and region. 11 The custom Red Hat Enterprise Linux CoreOS (RHCOS) image that was used for cluster installation. 12 The infrastructure ID within your region to place machines on. 2.7.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.7.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.8. Creating a compute machine set on Nutanix You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Nutanix. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.8.1. Sample YAML for a compute machine set custom resource on Nutanix This sample YAML defines a Nutanix compute machine set that creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. Values obtained by using the OpenShift CLI In the following example, you can obtain some of the values for your cluster by using the OpenShift CLI ( oc ). Infrastructure ID The <infrastructure_id> string is the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> name: <infrastructure_id>-<role>-<zone> 3 namespace: openshift-machine-api annotations: 4 machine.openshift.io/memoryMb: "16384" machine.openshift.io/vCPU: "4" spec: replicas: 3 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> spec: metadata: labels: node-role.kubernetes.io/<role>: "" providerSpec: value: apiVersion: machine.openshift.io/v1 bootType: "" 5 categories: 6 - key: <category_name> value: <category_value> cluster: 7 type: uuid uuid: <cluster_uuid> credentialsSecret: name: nutanix-credentials image: name: <infrastructure_id>-rhcos 8 type: name kind: NutanixMachineProviderConfig memorySize: 16Gi 9 project: 10 type: name name: <project_name> subnets: - type: uuid uuid: <subnet_uuid> systemDiskSize: 120Gi 11 userDataSecret: name: <user_data_secret> 12 vcpuSockets: 4 13 vcpusPerSocket: 1 14 1 For <infrastructure_id> , specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. 2 Specify the node label to add. 3 Specify the infrastructure ID, node label, and zone. 4 Annotations for the cluster autoscaler. 5 Specifies the boot type that the compute machines use. For more information about boot types, see Understanding UEFI, Secure Boot, and TPM in the Virtualized Environment . Valid values are Legacy , SecureBoot , or UEFI . The default is Legacy . Note You must use the Legacy boot type in OpenShift Container Platform 4.13. 6 Specify one or more Nutanix Prism categories to apply to compute machines. This stanza requires key and value parameters for a category key-value pair that exists in Prism Central. For more information about categories, see Category management . 7 Specify a Nutanix Prism Element cluster configuration. In this example, the cluster type is uuid , so there is a uuid stanza. 8 Specify the image to use. Use an image from an existing default compute machine set for the cluster. 9 Specify the amount of memory for the cluster in Gi. 10 Specify the Nutanix project that you use for your cluster. In this example, the project type is name , so there is a name stanza. 11 Specify the size of the system disk in Gi. 12 Specify the name of the secret in the user data YAML file that is in the openshift-machine-api namespace. Use the value that installation program populates in the default compute machine set. 13 Specify the number of vCPU sockets. 14 Specify the number of vCPUs per socket. 2.8.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.8.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.9. Creating a compute machine set on OpenStack You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Red Hat OpenStack Platform (RHOSP). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.9.1. Sample YAML for a compute machine set custom resource on RHOSP This sample YAML defines a compute machine set that runs on Red Hat OpenStack Platform (RHOSP) and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role> 4 namespace: openshift-machine-api spec: replicas: <number_of_replicas> selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 10 spec: providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: <nova_flavor> image: <glance_image_name_or_location> serverGroupID: <optional_UUID_of_server_group> 11 kind: OpenstackProviderSpec networks: 12 - filter: {} subnets: - filter: name: <subnet_name> tags: openshiftClusterID=<infrastructure_id> 13 primarySubnet: <rhosp_subnet_UUID> 14 securityGroups: - filter: {} name: <infrastructure_id>-worker 15 serverMetadata: Name: <infrastructure_id>-worker 16 openshiftClusterID: <infrastructure_id> 17 tags: - openshiftClusterID=<infrastructure_id> 18 trunk: true userDataSecret: name: worker-user-data 19 availabilityZone: <optional_openstack_availability_zone> 1 5 7 13 15 16 17 18 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 3 8 9 19 Specify the node label to add. 4 6 10 Specify the infrastructure ID and node label. 11 To set a server group policy for the MachineSet, enter the value that is returned from creating a server group . For most deployments, anti-affinity or soft-anti-affinity policies are recommended. 12 Required for deployments to multiple networks. To specify multiple networks, add another entry in the networks array. Also, you must include the network that is used as the primarySubnet value. 14 Specify the RHOSP subnet that you want the endpoints of nodes to be published on. Usually, this is the same subnet that is used as the value of machinesSubnet in the install-config.yaml file. 2.9.2. Sample YAML for a compute machine set custom resource that uses SR-IOV on RHOSP If you configured your cluster for single-root I/O virtualization (SR-IOV), you can create compute machine sets that use that technology. This sample YAML defines a compute machine set that uses SR-IOV networks. The nodes that it creates are labeled with node-role.openshift.io/<node_role>: "" In this sample, infrastructure_id is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and node_role is the node label to add. The sample assumes two SR-IOV networks that are named "radio" and "uplink". The networks are used in port definitions in the spec.template.spec.providerSpec.value.ports list. Note Only parameters that are specific to SR-IOV deployments are described in this sample. To review a more general sample, see "Sample YAML for a compute machine set custom resource on RHOSP". An example compute machine set that uses SR-IOV networks apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> name: <infrastructure_id>-<node_role> namespace: openshift-machine-api spec: replicas: <number_of_replicas> selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> spec: metadata: providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: <nova_flavor> image: <glance_image_name_or_location> serverGroupID: <optional_UUID_of_server_group> kind: OpenstackProviderSpec networks: - subnets: - UUID: <machines_subnet_UUID> ports: - networkID: <radio_network_UUID> 1 nameSuffix: radio fixedIPs: - subnetID: <radio_subnet_UUID> 2 tags: - sriov - radio vnicType: direct 3 portSecurity: false 4 - networkID: <uplink_network_UUID> 5 nameSuffix: uplink fixedIPs: - subnetID: <uplink_subnet_UUID> 6 tags: - sriov - uplink vnicType: direct 7 portSecurity: false 8 primarySubnet: <machines_subnet_UUID> securityGroups: - filter: {} name: <infrastructure_id>-<node_role> serverMetadata: Name: <infrastructure_id>-<node_role> openshiftClusterID: <infrastructure_id> tags: - openshiftClusterID=<infrastructure_id> trunk: true userDataSecret: name: <node_role>-user-data availabilityZone: <optional_openstack_availability_zone> 1 5 Enter a network UUID for each port. 2 6 Enter a subnet UUID for each port. 3 7 The value of the vnicType parameter must be direct for each port. 4 8 The value of the portSecurity parameter must be false for each port. You cannot set security groups and allowed address pairs for ports when port security is disabled. Setting security groups on the instance applies the groups to all ports that are attached to it. Important After you deploy compute machines that are SR-IOV-capable, you must label them as such. For example, from a command line, enter: USD oc label node <NODE_NAME> feature.node.kubernetes.io/network-sriov.capable="true" Note Trunking is enabled for ports that are created by entries in the networks and subnets lists. The names of ports that are created from these lists follow the pattern <machine_name>-<nameSuffix> . The nameSuffix field is required in port definitions. You can enable trunking for each port. Optionally, you can add tags to ports as part of their tags lists. Additional resources Preparing to install a cluster that uses SR-IOV or OVS-DPDK on OpenStack 2.9.3. Sample YAML for SR-IOV deployments where port security is disabled To create single-root I/O virtualization (SR-IOV) ports on a network that has port security disabled, define a compute machine set that includes the ports as items in the spec.template.spec.providerSpec.value.ports list. This difference from the standard SR-IOV compute machine set is due to the automatic security group and allowed address pair configuration that occurs for ports that are created by using the network and subnet interfaces. Ports that you define for machines subnets require: Allowed address pairs for the API and ingress virtual IP ports The compute security group Attachment to the machines network and subnet Note Only parameters that are specific to SR-IOV deployments where port security is disabled are described in this sample. To review a more general sample, see Sample YAML for a compute machine set custom resource that uses SR-IOV on RHOSP". An example compute machine set that uses SR-IOV networks and has port security disabled apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> name: <infrastructure_id>-<node_role> namespace: openshift-machine-api spec: replicas: <number_of_replicas> selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> spec: metadata: {} providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: <nova_flavor> image: <glance_image_name_or_location> kind: OpenstackProviderSpec ports: - allowedAddressPairs: 1 - ipAddress: <API_VIP_port_IP> - ipAddress: <ingress_VIP_port_IP> fixedIPs: - subnetID: <machines_subnet_UUID> 2 nameSuffix: nodes networkID: <machines_network_UUID> 3 securityGroups: - <compute_security_group_UUID> 4 - networkID: <SRIOV_network_UUID> nameSuffix: sriov fixedIPs: - subnetID: <SRIOV_subnet_UUID> tags: - sriov vnicType: direct portSecurity: False primarySubnet: <machines_subnet_UUID> serverMetadata: Name: <infrastructure_ID>-<node_role> openshiftClusterID: <infrastructure_id> tags: - openshiftClusterID=<infrastructure_id> trunk: false userDataSecret: name: worker-user-data 1 Specify allowed address pairs for the API and ingress ports. 2 3 Specify the machines network and subnet. 4 Specify the compute machines security group. Note Trunking is enabled for ports that are created by entries in the networks and subnets lists. The names of ports that are created from these lists follow the pattern <machine_name>-<nameSuffix> . The nameSuffix field is required in port definitions. You can enable trunking for each port. Optionally, you can add tags to ports as part of their tags lists. If your cluster uses Kuryr and the RHOSP SR-IOV network has port security disabled, the primary port for compute machines must have: The value of the spec.template.spec.providerSpec.value.networks.portSecurityEnabled parameter set to false . For each subnet, the value of the spec.template.spec.providerSpec.value.networks.subnets.portSecurityEnabled parameter set to false . The value of spec.template.spec.providerSpec.value.securityGroups set to empty: [] . An example section of a compute machine set for a cluster on Kuryr that uses SR-IOV and has port security disabled ... networks: - subnets: - uuid: <machines_subnet_UUID> portSecurityEnabled: false portSecurityEnabled: false securityGroups: [] ... In that case, you can apply the compute security group to the primary VM interface after the VM is created. For example, from a command line: USD openstack port set --enable-port-security --security-group <infrastructure_id>-<node_role> <main_port_ID> Important After you deploy compute machines that are SR-IOV-capable, you must label them as such. For example, from a command line, enter: USD oc label node <NODE_NAME> feature.node.kubernetes.io/network-sriov.capable="true" 2.9.4. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.9.5. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.10. Creating a compute machine set on RHV You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on Red Hat Virtualization (RHV). For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.10.1. Sample YAML for a compute machine set custom resource on RHV This sample YAML defines a compute machine set that runs on RHV and creates nodes that are labeled with node-role.kubernetes.io/<node_role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role> 4 namespace: openshift-machine-api spec: replicas: <number_of_replicas> 5 Selector: 6 matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 8 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 9 machine.openshift.io/cluster-api-machine-role: <role> 10 machine.openshift.io/cluster-api-machine-type: <role> 11 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 12 spec: metadata: labels: node-role.kubernetes.io/<role>: "" 13 providerSpec: value: apiVersion: ovirtproviderconfig.machine.openshift.io/v1beta1 cluster_id: <ovirt_cluster_id> 14 template_name: <ovirt_template_name> 15 sparse: <boolean_value> 16 format: <raw_or_cow> 17 cpu: 18 sockets: <number_of_sockets> 19 cores: <number_of_cores> 20 threads: <number_of_threads> 21 memory_mb: <memory_size> 22 guaranteed_memory_mb: <memory_size> 23 os_disk: 24 size_gb: <disk_size> 25 storage_domain_id: <storage_domain_UUID> 26 network_interfaces: 27 vnic_profile_id: <vnic_profile_id> 28 credentialsSecret: name: ovirt-credentials 29 kind: OvirtMachineProviderSpec type: <workload_type> 30 auto_pinning_policy: <auto_pinning_policy> 31 hugepages: <hugepages> 32 affinityGroupsNames: - compute 33 userDataSecret: name: worker-user-data 1 7 9 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI ( oc ) installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 3 10 11 13 Specify the node label to add. 4 8 12 Specify the infrastructure ID and node label. These two strings together cannot be longer than 35 characters. 5 Specify the number of machines to create. 6 Selector for the machines. 14 Specify the UUID for the RHV cluster to which this VM instance belongs. 15 Specify the RHV VM template to use to create the machine. 16 Setting this option to false enables preallocation of disks. The default is true . Setting sparse to true with format set to raw is not available for block storage domains. The raw format writes the entire virtual disk to the underlying physical disk. 17 Can be set to cow or raw . The default is cow . The cow format is optimized for virtual machines. Note Preallocating disks on file storage domains writes zeroes to the file. This might not actually preallocate disks depending on the underlying storage. 18 Optional: The CPU field contains the CPU configuration, including sockets, cores, and threads. 19 Optional: Specify the number of sockets for a VM. 20 Optional: Specify the number of cores per socket. 21 Optional: Specify the number of threads per core. 22 Optional: Specify the size of a VM's memory in MiB. 23 Optional: Specify the size of a virtual machine's guaranteed memory in MiB. This is the amount of memory that is guaranteed not to be drained by the ballooning mechanism. For more information, see Memory Ballooning and Optimization Settings Explained . Note If you are using a version earlier than RHV 4.4.8, see Guaranteed memory requirements for OpenShift on Red Hat Virtualization clusters . 24 Optional: Root disk of the node. 25 Optional: Specify the size of the bootable disk in GiB. 26 Optional: Specify the UUID of the storage domain for the compute node's disks. If none is provided, the compute node is created on the same storage domain as the control nodes. (default) 27 Optional: List of the network interfaces of the VM. If you include this parameter, OpenShift Container Platform discards all network interfaces from the template and creates new ones. 28 Optional: Specify the vNIC profile ID. 29 Specify the name of the secret object that holds the RHV credentials. 30 Optional: Specify the workload type for which the instance is optimized. This value affects the RHV VM parameter. Supported values: desktop , server (default), high_performance . high_performance improves performance on the VM. Limitations exist, for example, you cannot access the VM with a graphical console. For more information, see Configuring High Performance Virtual Machines, Templates, and Pools in the Virtual Machine Management Guide . 31 Optional: AutoPinningPolicy defines the policy that automatically sets CPU and NUMA settings, including pinning to the host for this instance. Supported values: none , resize_and_pin . For more information, see Setting NUMA Nodes in the Virtual Machine Management Guide . 32 Optional: Hugepages is the size in KiB for defining hugepages in a VM. Supported values: 2048 or 1048576 . For more information, see Configuring Huge Pages in the Virtual Machine Management Guide . 33 Optional: A list of affinity group names to be applied to the VMs. The affinity groups must exist in oVirt. Note Because RHV uses a template when creating a VM, if you do not specify a value for an optional parameter, RHV uses the value for that parameter that is specified in the template. 2.10.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.11. Creating a compute machine set on vSphere You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on VMware vSphere. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.11.1. Sample YAML for a compute machine set custom resource on vSphere This sample YAML defines a compute machine set that runs on VMware vSphere and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 3 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 4 template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machine-role: <role> 6 machine.openshift.io/cluster-api-machine-type: <role> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 8 spec: metadata: creationTimestamp: null labels: node-role.kubernetes.io/<role>: "" 9 providerSpec: value: apiVersion: vsphereprovider.openshift.io/v1beta1 credentialsSecret: name: vsphere-cloud-credentials diskGiB: 120 kind: VSphereMachineProviderSpec memoryMiB: 8192 metadata: creationTimestamp: null network: devices: - networkName: "<vm_network_name>" 10 numCPUs: 4 numCoresPerSocket: 1 snapshot: "" template: <vm_template_name> 11 userDataSecret: name: worker-user-data workspace: datacenter: <vcenter_datacenter_name> 12 datastore: <vcenter_datastore_name> 13 folder: <vcenter_vm_folder_path> 14 resourcepool: <vsphere_resource_pool> 15 server: <vcenter_server_ip> 16 1 3 5 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI ( oc ) installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 4 8 Specify the infrastructure ID and node label. 6 7 9 Specify the node label to add. 10 Specify the vSphere VM network to deploy the compute machine set to. This VM network must be where other compute machines reside in the cluster. 11 Specify the vSphere VM template to use, such as user-5ddjd-rhcos . 12 Specify the vCenter Datacenter to deploy the compute machine set on. 13 Specify the vCenter Datastore to deploy the compute machine set on. 14 Specify the path to the vSphere VM folder in vCenter, such as /dc1/vm/user-inst-5ddjd . 15 Specify the vSphere resource pool for your VMs. 16 Specify the vCenter server IP or fully qualified domain name. 2.11.2. Minimum required vCenter privileges for compute machine set management To manage compute machine sets in an OpenShift Container Platform cluster on vCenter, you must use an account with privileges to read, create, and delete the required resources. Using an account that has global administrative privileges is the simplest way to access all of the necessary permissions. If you cannot use an account with global administrative privileges, you must create roles to grant the minimum required privileges. The following table lists the minimum vCenter roles and privileges that are required to create, scale, and delete compute machine sets and to delete machines in your OpenShift Container Platform cluster. Example 2.1. Minimum vCenter roles and privileges required for compute machine set management vSphere object for role When required Required privileges vSphere vCenter Always InventoryService.Tagging.AttachTag InventoryService.Tagging.CreateCategory InventoryService.Tagging.CreateTag InventoryService.Tagging.DeleteCategory InventoryService.Tagging.DeleteTag InventoryService.Tagging.EditCategory InventoryService.Tagging.EditTag Sessions.ValidateSession StorageProfile.Update 1 StorageProfile.View 1 vSphere vCenter Cluster Always Resource.AssignVMToPool vSphere Datastore Always Datastore.AllocateSpace Datastore.Browse vSphere Port Group Always Network.Assign Virtual Machine Folder Always VirtualMachine.Config.AddRemoveDevice VirtualMachine.Config.AdvancedConfig VirtualMachine.Config.Annotation VirtualMachine.Config.CPUCount VirtualMachine.Config.DiskExtend VirtualMachine.Config.Memory VirtualMachine.Config.Settings VirtualMachine.Interact.PowerOff VirtualMachine.Interact.PowerOn VirtualMachine.Inventory.CreateFromExisting VirtualMachine.Inventory.Delete VirtualMachine.Provisioning.Clone vSphere vCenter Datacenter If the installation program creates the virtual machine folder Resource.AssignVMToPool VirtualMachine.Provisioning.DeployTemplate 1 The StorageProfile.Update and StorageProfile.View permissions are required only for storage backends that use the Container Storage Interface (CSI). The following table details the permissions and propagation settings that are required for compute machine set management. Example 2.2. Required permissions and propagation settings vSphere object Folder type Propagate to children Permissions required vSphere vCenter Always Not required Listed required privileges vSphere vCenter Datacenter Existing folder Not required ReadOnly permission Installation program creates the folder Required Listed required privileges vSphere vCenter Cluster Always Required Listed required privileges vSphere vCenter Datastore Always Not required Listed required privileges vSphere Switch Always Not required ReadOnly permission vSphere Port Group Always Not required Listed required privileges vSphere vCenter Virtual Machine Folder Existing folder Required Listed required privileges For more information about creating an account with only the required privileges, see vSphere Permissions and User Management Tasks in the vSphere documentation. 2.11.3. Requirements for clusters with user-provisioned infrastructure to use compute machine sets To use compute machine sets on clusters that have user-provisioned infrastructure, you must ensure that you cluster configuration supports using the Machine API. Obtaining the infrastructure ID To create compute machine sets, you must be able to supply the infrastructure ID for your cluster. Procedure To obtain the infrastructure ID for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.infrastructureName}' Satisfying vSphere credentials requirements To use compute machine sets, the Machine API must be able to interact with vCenter. Credentials that authorize the Machine API components to interact with vCenter must exist in a secret in the openshift-machine-api namespace. Procedure To determine whether the required credentials exist, run the following command: USD oc get secret \ -n openshift-machine-api vsphere-cloud-credentials \ -o go-template='{{range USDk,USDv := .data}}{{printf "%s: " USDk}}{{if not USDv}}{{USDv}}{{else}}{{USDv | base64decode}}{{end}}{{"\n"}}{{end}}' Sample output <vcenter-server>.password=<openshift-user-password> <vcenter-server>.username=<openshift-user> where <vcenter-server> is the IP address or fully qualified domain name (FQDN) of the vCenter server and <openshift-user> and <openshift-user-password> are the OpenShift Container Platform administrator credentials to use. If the secret does not exist, create it by running the following command: USD oc create secret generic vsphere-cloud-credentials \ -n openshift-machine-api \ --from-literal=<vcenter-server>.username=<openshift-user> --from-literal=<vcenter-server>.password=<openshift-user-password> Satisfying Ignition configuration requirements Provisioning virtual machines (VMs) requires a valid Ignition configuration. The Ignition configuration contains the machine-config-server address and a system trust bundle for obtaining further Ignition configurations from the Machine Config Operator. By default, this configuration is stored in the worker-user-data secret in the machine-api-operator namespace. Compute machine sets reference the secret during the machine creation process. Procedure To determine whether the required secret exists, run the following command: USD oc get secret \ -n openshift-machine-api worker-user-data \ -o go-template='{{range USDk,USDv := .data}}{{printf "%s: " USDk}}{{if not USDv}}{{USDv}}{{else}}{{USDv | base64decode}}{{end}}{{"\n"}}{{end}}' Sample output disableTemplating: false userData: 1 { "ignition": { ... }, ... } 1 The full output is omitted here, but should have this format. If the secret does not exist, create it by running the following command: USD oc create secret generic worker-user-data \ -n openshift-machine-api \ --from-file=<installation_directory>/worker.ign where <installation_directory> is the directory that was used to store your installation assets during cluster installation. Additional resources Understanding the Machine Config Operator Installing RHCOS and starting the OpenShift Container Platform bootstrap process 2.11.4. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Note Clusters that are installed with user-provisioned infrastructure have a different networking stack than clusters with infrastructure that is provisioned by the installation program. As a result of this difference, automatic load balancer management is unsupported on clusters that have user-provisioned infrastructure. For these clusters, a compute machine set can only create worker and infra type machines. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Have the necessary permissions to deploy VMs in your vCenter instance and have the required access to the datastore specified. If your cluster uses user-provisioned infrastructure, you have satisfied the specific Machine API requirements for that configuration. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. If you are creating a compute machine set for a cluster that has user-provisioned infrastructure, note the following important values: Example vSphere providerSpec values apiVersion: machine.openshift.io/v1beta1 kind: MachineSet ... template: ... spec: providerSpec: value: apiVersion: machine.openshift.io/v1beta1 credentialsSecret: name: vsphere-cloud-credentials 1 diskGiB: 120 kind: VSphereMachineProviderSpec memoryMiB: 16384 network: devices: - networkName: "<vm_network_name>" numCPUs: 4 numCoresPerSocket: 4 snapshot: "" template: <vm_template_name> 2 userDataSecret: name: worker-user-data 3 workspace: datacenter: <vcenter_datacenter_name> datastore: <vcenter_datastore_name> folder: <vcenter_vm_folder_path> resourcepool: <vsphere_resource_pool> server: <vcenter_server_address> 4 1 The name of the secret in the openshift-machine-api namespace that contains the required vCenter credentials. 2 The name of the RHCOS VM template for your cluster that was created during installation. 3 The name of the secret in the openshift-machine-api namespace that contains the required Ignition configuration credentials. 4 The IP address or fully qualified domain name (FQDN) of the vCenter server. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.11.5. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition 2.12. Creating a compute machine set on bare metal You can create a different compute machine set to serve a specific purpose in your OpenShift Container Platform cluster on bare metal. For example, you might create infrastructure machine sets and related machines so that you can move supporting workloads to the new machines. Important You can use the advanced machine management and scaling capabilities only in clusters where the Machine API is operational. Clusters with user-provisioned infrastructure require additional validation and configuration to use the Machine API. Clusters with the infrastructure platform type none cannot use the Machine API. This limitation applies even if the compute machines that are attached to the cluster are installed on a platform that supports the feature. This parameter cannot be changed after installation. To view the platform type for your cluster, run the following command: USD oc get infrastructure cluster -o jsonpath='{.status.platform}' 2.12.1. Sample YAML for a compute machine set custom resource on bare metal This sample YAML defines a compute machine set that runs on bare metal and creates nodes that are labeled with node-role.kubernetes.io/<role>: "" . In this sample, <infrastructure_id> is the infrastructure ID label that is based on the cluster ID that you set when you provisioned the cluster, and <role> is the node label to add. apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 3 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 4 template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machine-role: <role> 6 machine.openshift.io/cluster-api-machine-type: <role> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 8 spec: metadata: creationTimestamp: null labels: node-role.kubernetes.io/<role>: "" 9 providerSpec: value: apiVersion: baremetal.cluster.k8s.io/v1alpha1 hostSelector: {} image: checksum: http:/172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2.<md5sum> 10 url: http://172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2 11 kind: BareMetalMachineProviderSpec metadata: creationTimestamp: null userData: name: worker-user-data 1 3 5 Specify the infrastructure ID that is based on the cluster ID that you set when you provisioned the cluster. If you have the OpenShift CLI ( oc ) installed, you can obtain the infrastructure ID by running the following command: USD oc get -o jsonpath='{.status.infrastructureName}{"\n"}' infrastructure cluster 2 4 8 Specify the infrastructure ID and node label. 6 7 9 Specify the node label to add. 10 Edit the checksum URL to use the API VIP address. 11 Edit the url URL to use the API VIP address. 2.12.2. Creating a compute machine set In addition to the compute machine sets created by the installation program, you can create your own to dynamically manage the machine compute resources for specific workloads of your choice. Prerequisites Deploy an OpenShift Container Platform cluster. Install the OpenShift CLI ( oc ). Log in to oc as a user with cluster-admin permission. Procedure Create a new YAML file that contains the compute machine set custom resource (CR) sample and is named <file_name>.yaml . Ensure that you set the <clusterID> and <role> parameter values. Optional: If you are not sure which value to set for a specific field, you can check an existing compute machine set from your cluster. To list the compute machine sets in your cluster, run the following command: USD oc get machinesets -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m To view values of a specific compute machine set custom resource (CR), run the following command: USD oc get machineset <machineset_name> \ -n openshift-machine-api -o yaml Example output apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3 ... 1 The cluster infrastructure ID. 2 A default node label. Note For clusters that have user-provisioned infrastructure, a compute machine set can only create worker and infra type machines. 3 The values in the <providerSpec> section of the compute machine set CR are platform-specific. For more information about <providerSpec> parameters in the CR, see the sample compute machine set CR configuration for your provider. Create a MachineSet CR by running the following command: USD oc create -f <file_name>.yaml Verification View the list of compute machine sets by running the following command: USD oc get machineset -n openshift-machine-api Example output NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m When the new compute machine set is available, the DESIRED and CURRENT values match. If the compute machine set is not available, wait a few minutes and run the command again. 2.12.3. Labeling GPU machine sets for the cluster autoscaler You can use a machine set label to indicate which machines the cluster autoscaler can use to deploy GPU-enabled nodes. Prerequisites Your cluster uses a cluster autoscaler. Procedure On the machine set that you want to create machines for the cluster autoscaler to use to deploy GPU-enabled nodes, add a cluster-api/accelerator label: apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1 1 Specify a label of your choice that consists of alphanumeric characters, - , _ , or . and starts and ends with an alphanumeric character. For example, you might use nvidia-t4 to represent Nvidia T4 GPUs, or nvidia-a10g for A10G GPUs. Note You must specify the value of this label for the spec.resourceLimits.gpus.type parameter in your ClusterAutoscaler CR. For more information, see "Cluster autoscaler resource definition". Additional resources Cluster autoscaler resource definition
[ "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<zone> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 10 spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" providerSpec: value: apiVersion: machine.openshift.io/v1 credentialsSecret: name: alibabacloud-credentials imageId: <image_id> 11 instanceType: <instance_type> 12 kind: AlibabaCloudMachineProviderConfig ramRoleName: <infrastructure_id>-role-worker 13 regionId: <region> 14 resourceGroup: 15 id: <resource_group_id> type: ID securityGroups: - tags: 16 - Key: Name Value: <infrastructure_id>-sg-<role> type: Tags systemDisk: 17 category: cloud_essd size: <disk_size> tag: 18 - Key: kubernetes.io/cluster/<infrastructure_id> Value: owned userDataSecret: name: <user_data_secret> 19 vSwitch: tags: 20 - Key: Name Value: <infrastructure_id>-vswitch-<zone> type: Tags vpcId: \"\" zoneId: <zone> 21", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "spec: template: spec: providerSpec: value: securityGroups: - tags: - Key: kubernetes.io/cluster/<infrastructure_id> 1 Value: owned - Key: GISV Value: ocp - Key: sigs.k8s.io/cloud-provider-alibaba/origin 2 Value: ocp - Key: Name Value: <infrastructure_id>-sg-<role> 3 type: Tags", "spec: template: spec: providerSpec: value: tag: - Key: kubernetes.io/cluster/<infrastructure_id> 1 Value: owned - Key: GISV 2 Value: ocp - Key: sigs.k8s.io/cloud-provider-alibaba/origin 3 Value: ocp", "spec: template: spec: providerSpec: value: vSwitch: tags: - Key: kubernetes.io/cluster/<infrastructure_id> 1 Value: owned - Key: GISV 2 Value: ocp - Key: sigs.k8s.io/cloud-provider-alibaba/origin 3 Value: ocp - Key: Name Value: <infrastructure_id>-vswitch-<zone> 4 type: Tags", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role>-<zone> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 3 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 4 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machine-role: <role> 6 machine.openshift.io/cluster-api-machine-type: <role> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> 8 spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" 9 providerSpec: value: ami: id: ami-046fe691f52a953f9 10 apiVersion: awsproviderconfig.openshift.io/v1beta1 blockDevices: - ebs: iops: 0 volumeSize: 120 volumeType: gp2 credentialsSecret: name: aws-cloud-credentials deviceIndex: 0 iamInstanceProfile: id: <infrastructure_id>-worker-profile 11 instanceType: m6i.large kind: AWSMachineProviderConfig placement: availabilityZone: <zone> 12 region: <region> 13 securityGroups: - filters: - name: tag:Name values: - <infrastructure_id>-worker-sg 14 subnet: filters: - name: tag:Name values: - <infrastructure_id>-private-<zone> 15 tags: - name: kubernetes.io/cluster/<infrastructure_id> 16 value: owned - name: <custom_tag_name> 17 value: <custom_tag_value> 18 userDataSecret: name: worker-user-data", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc -n openshift-machine-api -o jsonpath='{.spec.template.spec.providerSpec.value.ami.id}{\"\\n\"}' get machineset/<infrastructure_id>-<role>-<zone>", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "providerSpec: value: metadataServiceOptions: authentication: Required 1", "providerSpec: placement: tenancy: dedicated", "providerSpec: value: spotMarketOptions: {}", "oc get nodes", "NAME STATUS ROLES AGE VERSION ip-10-0-52-50.us-east-2.compute.internal Ready worker 3d17h v1.26.0 ip-10-0-58-24.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 ip-10-0-68-148.us-east-2.compute.internal Ready worker 3d17h v1.26.0 ip-10-0-68-68.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 ip-10-0-72-170.us-east-2.compute.internal Ready control-plane,master 3d17h v1.26.0 ip-10-0-74-50.us-east-2.compute.internal Ready worker 3d17h v1.26.0", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE preserve-dsoc12r4-ktjfc-worker-us-east-2a 1 1 1 1 3d11h preserve-dsoc12r4-ktjfc-worker-us-east-2b 2 2 2 2 3d11h", "oc get machines -n openshift-machine-api | grep worker", "preserve-dsoc12r4-ktjfc-worker-us-east-2a-dts8r Running m5.xlarge us-east-2 us-east-2a 3d11h preserve-dsoc12r4-ktjfc-worker-us-east-2b-dkv7w Running m5.xlarge us-east-2 us-east-2b 3d11h preserve-dsoc12r4-ktjfc-worker-us-east-2b-k58cw Running m5.xlarge us-east-2 us-east-2b 3d11h", "oc get machineset preserve-dsoc12r4-ktjfc-worker-us-east-2a -n openshift-machine-api -o json > <output_file.json>", "jq .spec.template.spec.providerSpec.value.instanceType preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json \"g4dn.xlarge\"", "oc -n openshift-machine-api get preserve-dsoc12r4-ktjfc-worker-us-east-2a -o json | diff preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json -", "10c10 < \"name\": \"preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a\", --- > \"name\": \"preserve-dsoc12r4-ktjfc-worker-us-east-2a\", 21c21 < \"machine.openshift.io/cluster-api-machineset\": \"preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a\" --- > \"machine.openshift.io/cluster-api-machineset\": \"preserve-dsoc12r4-ktjfc-worker-us-east-2a\" 31c31 < \"machine.openshift.io/cluster-api-machineset\": \"preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a\" --- > \"machine.openshift.io/cluster-api-machineset\": \"preserve-dsoc12r4-ktjfc-worker-us-east-2a\" 60c60 < \"instanceType\": \"g4dn.xlarge\", --- > \"instanceType\": \"m5.xlarge\",", "oc create -f preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a.json", "machineset.machine.openshift.io/preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a created", "oc -n openshift-machine-api get machinesets | grep gpu", "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a 1 1 1 1 4m21s", "oc -n openshift-machine-api get machines | grep gpu", "preserve-dsoc12r4-ktjfc-worker-gpu-us-east-2a running g4dn.xlarge us-east-2 us-east-2a 4m36s", "oc get pods -n openshift-nfd", "NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d", "oc get pods -n openshift-nfd", "NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d", "oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci'", "Roles: worker feature.node.kubernetes.io/pci-1013.present=true feature.node.kubernetes.io/pci-10de.present=true feature.node.kubernetes.io/pci-1d0f.present=true", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> name: <infrastructure_id>-<role>-<region> 3 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> spec: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-machineset: <machineset_name> node-role.kubernetes.io/<role>: \"\" providerSpec: value: apiVersion: azureproviderconfig.openshift.io/v1beta1 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api image: 4 offer: \"\" publisher: \"\" resourceID: /resourceGroups/<infrastructure_id>-rg/providers/Microsoft.Compute/galleries/gallery_<infrastructure_id>/images/<infrastructure_id>-gen2/versions/latest 5 sku: \"\" version: \"\" internalLoadBalancer: \"\" kind: AzureMachineProviderSpec location: <region> 6 managedIdentity: <infrastructure_id>-identity metadata: creationTimestamp: null natRule: null networkResourceGroup: \"\" osDisk: diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: \"\" resourceGroup: <infrastructure_id>-rg sshPrivateKey: \"\" sshPublicKey: \"\" tags: - name: <custom_tag_name> 7 value: <custom_tag_value> subnet: <infrastructure_id>-<role>-subnet userDataSecret: name: worker-user-data vmSize: Standard_D4s_v3 vnet: <infrastructure_id>-vnet zone: \"1\" 8", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc -n openshift-machine-api -o jsonpath='{.spec.template.spec.providerSpec.value.subnet}{\"\\n\"}' get machineset/<infrastructure_id>-worker-centralus1", "oc -n openshift-machine-api -o jsonpath='{.spec.template.spec.providerSpec.value.vnet}{\"\\n\"}' get machineset/<infrastructure_id>-worker-centralus1", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "az vm image list --all --offer rh-ocp-worker --publisher redhat -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- -------------- rh-ocp-worker RedHat rh-ocp-worker RedHat:rh-ocp-worker:rh-ocpworker:4.8.2021122100 4.8.2021122100 rh-ocp-worker RedHat rh-ocp-worker-gen1 RedHat:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100", "az vm image list --all --offer rh-ocp-worker --publisher redhat-limited -o table", "Offer Publisher Sku Urn Version ------------- -------------- ------------------ -------------------------------------------------------------- -------------- rh-ocp-worker redhat-limited rh-ocp-worker redhat-limited:rh-ocp-worker:rh-ocp-worker:4.8.2021122100 4.8.2021122100 rh-ocp-worker redhat-limited rh-ocp-worker-gen1 redhat-limited:rh-ocp-worker:rh-ocp-worker-gen1:4.8.2021122100 4.8.2021122100", "az vm image show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms show --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat:rh-ocp-worker:rh-ocp-worker:<version>", "az vm image terms accept --urn redhat-limited:rh-ocp-worker:rh-ocp-worker:<version>", "providerSpec: value: image: offer: rh-ocp-worker publisher: redhat resourceID: \"\" sku: rh-ocp-worker type: MarketplaceWithPlan version: 4.8.2021122100", "providerSpec: diagnostics: boot: storageAccountType: AzureManaged 1", "providerSpec: diagnostics: boot: storageAccountType: CustomerManaged 1 customerManaged: storageAccountURI: https://<storage-account>.blob.core.windows.net 2", "providerSpec: value: spotVMOptions: {}", "oc edit machineset <machine-set-name>", "providerSpec: value: osDisk: diskSettings: 1 ephemeralStorageLocation: Local 2 cachingType: ReadOnly 3 managedDisk: storageAccountType: Standard_LRS 4", "oc create -f <machine-set-config>.yaml", "oc -n openshift-machine-api get secret <role>-user-data \\ 1 --template='{{index .data.userData | base64decode}}' | jq > userData.txt 2", "\"storage\": { \"disks\": [ 1 { \"device\": \"/dev/disk/azure/scsi1/lun0\", 2 \"partitions\": [ 3 { \"label\": \"lun0p1\", 4 \"sizeMiB\": 1024, 5 \"startMiB\": 0 } ] } ], \"filesystems\": [ 6 { \"device\": \"/dev/disk/by-partlabel/lun0p1\", \"format\": \"xfs\", \"path\": \"/var/lib/lun0p1\" } ] }, \"systemd\": { \"units\": [ 7 { \"contents\": \"[Unit]\\nBefore=local-fs.target\\n[Mount]\\nWhere=/var/lib/lun0p1\\nWhat=/dev/disk/by-partlabel/lun0p1\\nOptions=defaults,pquota\\n[Install]\\nWantedBy=local-fs.target\\n\", 8 \"enabled\": true, \"name\": \"var-lib-lun0p1.mount\" } ] }", "oc -n openshift-machine-api get secret <role>-user-data \\ 1 --template='{{index .data.disableTemplating | base64decode}}' | jq > disableTemplating.txt", "oc -n openshift-machine-api create secret generic <role>-user-data-x5 \\ 1 --from-file=userData=userData.txt --from-file=disableTemplating=disableTemplating.txt", "oc edit machineset <machine-set-name>", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet spec: template: spec: metadata: labels: disk: ultrassd 1 providerSpec: value: ultraSSDCapability: Enabled 2 dataDisks: 3 - nameSuffix: ultrassd lun: 0 diskSizeGB: 4 deletionPolicy: Delete cachingType: None managedDisk: storageAccountType: UltraSSD_LRS userDataSecret: name: <role>-user-data-x5 4", "oc create -f <machine-set-name>.yaml", "oc get machines", "oc debug node/<node-name> -- chroot /host lsblk", "apiVersion: v1 kind: Pod metadata: name: ssd-benchmark1 spec: containers: - name: ssd-benchmark1 image: nginx ports: - containerPort: 80 name: \"http-server\" volumeMounts: - name: lun0p1 mountPath: \"/tmp\" volumes: - name: lun0p1 hostPath: path: /var/lib/lun0p1 type: DirectoryOrCreate nodeSelector: disktype: ultrassd", "StorageAccountType UltraSSD_LRS can be used only when additionalCapabilities.ultraSSDEnabled is set.", "failed to create vm <machine_name>: failure sending request for machine <machine_name>: cannot create vm: compute.VirtualMachinesClient#CreateOrUpdate: Failure sending request: StatusCode=400 -- Original Error: Code=\"BadRequest\" Message=\"Storage Account type 'UltraSSD_LRS' is not supported <more_information_about_why>.\"", "providerSpec: value: osDisk: diskSizeGB: 128 managedDisk: diskEncryptionSet: id: /subscriptions/<subscription_id>/resourceGroups/<resource_group_name>/providers/Microsoft.Compute/diskEncryptionSets/<disk_encryption_set_name> storageAccountType: Premium_LRS", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-worker-centralus1 1 1 1 1 6h9m myclustername-worker-centralus2 1 1 1 1 6h9m myclustername-worker-centralus3 1 1 1 1 6h9m", "oc get machineset -n openshift-machine-api myclustername-worker-centralus1 -o yaml > machineset-azure.yaml", "cat machineset-azure.yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: annotations: machine.openshift.io/GPU: \"0\" machine.openshift.io/memoryMb: \"16384\" machine.openshift.io/vCPU: \"4\" creationTimestamp: \"2023-02-06T14:08:19Z\" generation: 1 labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker name: myclustername-worker-centralus1 namespace: openshift-machine-api resourceVersion: \"23601\" uid: acd56e0c-7612-473a-ae37-8704f34b80de spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 template: metadata: labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 spec: lifecycleHooks: {} metadata: {} providerSpec: value: acceleratedNetworking: true apiVersion: machine.openshift.io/v1beta1 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api diagnostics: {} image: offer: \"\" publisher: \"\" resourceID: /resourceGroups/myclustername-rg/providers/Microsoft.Compute/galleries/gallery_myclustername_n6n4r/images/myclustername-gen2/versions/latest sku: \"\" version: \"\" kind: AzureMachineProviderSpec location: centralus managedIdentity: myclustername-identity metadata: creationTimestamp: null networkResourceGroup: myclustername-rg osDisk: diskSettings: {} diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: myclustername resourceGroup: myclustername-rg spotVMOptions: {} subnet: myclustername-worker-subnet userDataSecret: name: worker-user-data vmSize: Standard_D4s_v3 vnet: myclustername-vnet zone: \"1\" status: availableReplicas: 1 fullyLabeledReplicas: 1 observedGeneration: 1 readyReplicas: 1 replicas: 1", "cp machineset-azure.yaml machineset-azure-gpu.yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: annotations: machine.openshift.io/GPU: \"1\" machine.openshift.io/memoryMb: \"28672\" machine.openshift.io/vCPU: \"4\" creationTimestamp: \"2023-02-06T20:27:12Z\" generation: 1 labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker name: myclustername-nc4ast4-gpu-worker-centralus1 namespace: openshift-machine-api resourceVersion: \"166285\" uid: 4eedce7f-6a57-4abe-b529-031140f02ffa spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 template: metadata: labels: machine.openshift.io/cluster-api-cluster: myclustername machine.openshift.io/cluster-api-machine-role: worker machine.openshift.io/cluster-api-machine-type: worker machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 spec: lifecycleHooks: {} metadata: {} providerSpec: value: acceleratedNetworking: true apiVersion: machine.openshift.io/v1beta1 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api diagnostics: {} image: offer: \"\" publisher: \"\" resourceID: /resourceGroups/myclustername-rg/providers/Microsoft.Compute/galleries/gallery_myclustername_n6n4r/images/myclustername-gen2/versions/latest sku: \"\" version: \"\" kind: AzureMachineProviderSpec location: centralus managedIdentity: myclustername-identity metadata: creationTimestamp: null networkResourceGroup: myclustername-rg osDisk: diskSettings: {} diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: myclustername resourceGroup: myclustername-rg spotVMOptions: {} subnet: myclustername-worker-subnet userDataSecret: name: worker-user-data vmSize: Standard_NC4as_T4_v3 vnet: myclustername-vnet zone: \"1\" status: availableReplicas: 1 fullyLabeledReplicas: 1 observedGeneration: 1 readyReplicas: 1 replicas: 1", "diff machineset-azure.yaml machineset-azure-gpu.yaml", "14c14 < name: myclustername-worker-centralus1 --- > name: myclustername-nc4ast4-gpu-worker-centralus1 23c23 < machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 --- > machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 30c30 < machine.openshift.io/cluster-api-machineset: myclustername-worker-centralus1 --- > machine.openshift.io/cluster-api-machineset: myclustername-nc4ast4-gpu-worker-centralus1 67c67 < vmSize: Standard_D4s_v3 --- > vmSize: Standard_NC4as_T4_v3", "oc create -f machineset-azure-gpu.yaml", "machineset.machine.openshift.io/myclustername-nc4ast4-gpu-worker-centralus1 created", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE clustername-n6n4r-nc4ast4-gpu-worker-centralus1 1 1 1 1 122m clustername-n6n4r-worker-centralus1 1 1 1 1 8h clustername-n6n4r-worker-centralus2 1 1 1 1 8h clustername-n6n4r-worker-centralus3 1 1 1 1 8h", "oc get machines -n openshift-machine-api", "NAME PHASE TYPE REGION ZONE AGE myclustername-master-0 Running Standard_D8s_v3 centralus 2 6h40m myclustername-master-1 Running Standard_D8s_v3 centralus 1 6h40m myclustername-master-2 Running Standard_D8s_v3 centralus 3 6h40m myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Running centralus 1 21m myclustername-worker-centralus1-rbh6b Running Standard_D4s_v3 centralus 1 6h38m myclustername-worker-centralus2-dbz7w Running Standard_D4s_v3 centralus 2 6h38m myclustername-worker-centralus3-p9b8c Running Standard_D4s_v3 centralus 3 6h38m", "oc get nodes", "NAME STATUS ROLES AGE VERSION myclustername-master-0 Ready control-plane,master 6h39m v1.26.0 myclustername-master-1 Ready control-plane,master 6h41m v1.26.0 myclustername-master-2 Ready control-plane,master 6h39m v1.26.0 myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Ready worker 14m v1.26.0 myclustername-worker-centralus1-rbh6b Ready worker 6h29m v1.26.0 myclustername-worker-centralus2-dbz7w Ready worker 6h29m v1.26.0 myclustername-worker-centralus3-p9b8c Ready worker 6h31m v1.26.0", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-worker-centralus1 1 1 1 1 8h myclustername-worker-centralus2 1 1 1 1 8h myclustername-worker-centralus3 1 1 1 1 8h", "oc create -f machineset-azure-gpu.yaml", "get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-nc4ast4-gpu-worker-centralus1 1 1 1 1 121m myclustername-worker-centralus1 1 1 1 1 8h myclustername-worker-centralus2 1 1 1 1 8h myclustername-worker-centralus3 1 1 1 1 8h", "oc get machineset -n openshift-machine-api | grep gpu", "myclustername-nc4ast4-gpu-worker-centralus1 1 1 1 1 121m", "oc -n openshift-machine-api get machines | grep gpu", "myclustername-nc4ast4-gpu-worker-centralus1-w9bqn Running Standard_NC4as_T4_v3 centralus 1 21m", "oc get pods -n openshift-nfd", "NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d", "oc get pods -n openshift-nfd", "NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d", "oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci'", "Roles: worker feature.node.kubernetes.io/pci-1013.present=true feature.node.kubernetes.io/pci-10de.present=true feature.node.kubernetes.io/pci-1d0f.present=true", "providerSpec: value: acceleratedNetworking: true 1 vmSize: <azure-vm-size> 2", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<region> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 6 template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 10 spec: metadata: creationTimestamp: null labels: node-role.kubernetes.io/<role>: \"\" 11 providerSpec: value: apiVersion: machine.openshift.io/v1beta1 availabilitySet: <availability_set> 12 credentialsSecret: name: azure-cloud-credentials namespace: openshift-machine-api image: offer: \"\" publisher: \"\" resourceID: /resourceGroups/<infrastructure_id>-rg/providers/Microsoft.Compute/images/<infrastructure_id> 13 sku: \"\" version: \"\" internalLoadBalancer: \"\" kind: AzureMachineProviderSpec location: <region> 14 managedIdentity: <infrastructure_id>-identity 15 metadata: creationTimestamp: null natRule: null networkResourceGroup: \"\" osDisk: diskSizeGB: 128 managedDisk: storageAccountType: Premium_LRS osType: Linux publicIP: false publicLoadBalancer: \"\" resourceGroup: <infrastructure_id>-rg 16 sshPrivateKey: \"\" sshPublicKey: \"\" subnet: <infrastructure_id>-<role>-subnet 17 18 userDataSecret: name: worker-user-data 19 vmSize: Standard_DS4_v2 vnet: <infrastructure_id>-vnet 20 zone: \"1\" 21", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc -n openshift-machine-api -o jsonpath='{.spec.template.spec.providerSpec.value.subnet}{\"\\n\"}' get machineset/<infrastructure_id>-worker-centralus1", "oc -n openshift-machine-api -o jsonpath='{.spec.template.spec.providerSpec.value.vnet}{\"\\n\"}' get machineset/<infrastructure_id>-worker-centralus1", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "providerSpec: diagnostics: boot: storageAccountType: AzureManaged 1", "providerSpec: diagnostics: boot: storageAccountType: CustomerManaged 1 customerManaged: storageAccountURI: https://<storage-account>.blob.core.windows.net 2", "providerSpec: value: osDisk: diskSizeGB: 128 managedDisk: diskEncryptionSet: id: /subscriptions/<subscription_id>/resourceGroups/<resource_group_name>/providers/Microsoft.Compute/diskEncryptionSets/<disk_encryption_set_name> storageAccountType: Premium_LRS", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc -n openshift-machine-api -o jsonpath='{.spec.template.spec.providerSpec.value.disks[0].image}{\"\\n\"}' get machineset/<infrastructure_id>-worker-a", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-w-a namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-w-a template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-w-a spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" providerSpec: value: apiVersion: gcpprovider.openshift.io/v1beta1 canIPForward: false credentialsSecret: name: gcp-cloud-credentials deletionProtection: false disks: - autoDelete: true boot: true image: <path_to_image> 3 labels: null sizeGb: 128 type: pd-ssd gcpMetadata: 4 - key: <custom_metadata_key> value: <custom_metadata_value> kind: GCPMachineProviderSpec machineType: n1-standard-4 metadata: creationTimestamp: null networkInterfaces: - network: <infrastructure_id>-network subnetwork: <infrastructure_id>-worker-subnet projectID: <project_name> 5 region: us-central1 serviceAccounts: 6 - email: <infrastructure_id>-w@<project_name>.iam.gserviceaccount.com scopes: - https://www.googleapis.com/auth/cloud-platform tags: - <infrastructure_id>-worker userDataSecret: name: worker-user-data zone: us-central1-a", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet spec: template: spec: providerSpec: value: disks: type: <pd-disk-type> 1", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet spec: template: spec: providerSpec: value: confidentialCompute: Enabled 1 onHostMaintenance: Terminate 2 machineType: n2d-standard-8 3", "providerSpec: value: preemptible: true", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet spec: template: spec: providerSpec: value: shieldedInstanceConfig: 1 integrityMonitoring: Enabled 2 secureBoot: Disabled 3 virtualizedTrustedPlatformModule: Enabled 4", "gcloud kms keys add-iam-policy-binding <key_name> --keyring <key_ring_name> --location <key_ring_location> --member \"serviceAccount:service-<project_number>@compute-system.iam.gserviceaccount.com\" --role roles/cloudkms.cryptoKeyEncrypterDecrypter", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet spec: template: spec: providerSpec: value: disks: - type: encryptionKey: kmsKey: name: machine-encryption-key 1 keyRing: openshift-encrpytion-ring 2 location: global 3 projectID: openshift-gcp-project 4 kmsKeyServiceAccount: openshift-service-account@openshift-gcp-project.iam.gserviceaccount.com 5", "providerSpec: value: machineType: a2-highgpu-1g 1 onHostMaintenance: Terminate 2 restartPolicy: Always 3", "providerSpec: value: gpus: - count: 1 1 type: nvidia-tesla-p100 2 machineType: n1-standard-1 3 onHostMaintenance: Terminate 4 restartPolicy: Always 5", "machineType: a2-highgpu-1g onHostMaintenance: Terminate", "{ \"apiVersion\": \"machine.openshift.io/v1beta1\", \"kind\": \"MachineSet\", \"metadata\": { \"annotations\": { \"machine.openshift.io/GPU\": \"0\", \"machine.openshift.io/memoryMb\": \"16384\", \"machine.openshift.io/vCPU\": \"4\" }, \"creationTimestamp\": \"2023-01-13T17:11:02Z\", \"generation\": 1, \"labels\": { \"machine.openshift.io/cluster-api-cluster\": \"myclustername-2pt9p\" }, \"name\": \"myclustername-2pt9p-worker-gpu-a\", \"namespace\": \"openshift-machine-api\", \"resourceVersion\": \"20185\", \"uid\": \"2daf4712-733e-4399-b4b4-d43cb1ed32bd\" }, \"spec\": { \"replicas\": 1, \"selector\": { \"matchLabels\": { \"machine.openshift.io/cluster-api-cluster\": \"myclustername-2pt9p\", \"machine.openshift.io/cluster-api-machineset\": \"myclustername-2pt9p-worker-gpu-a\" } }, \"template\": { \"metadata\": { \"labels\": { \"machine.openshift.io/cluster-api-cluster\": \"myclustername-2pt9p\", \"machine.openshift.io/cluster-api-machine-role\": \"worker\", \"machine.openshift.io/cluster-api-machine-type\": \"worker\", \"machine.openshift.io/cluster-api-machineset\": \"myclustername-2pt9p-worker-gpu-a\" } }, \"spec\": { \"lifecycleHooks\": {}, \"metadata\": {}, \"providerSpec\": { \"value\": { \"apiVersion\": \"machine.openshift.io/v1beta1\", \"canIPForward\": false, \"credentialsSecret\": { \"name\": \"gcp-cloud-credentials\" }, \"deletionProtection\": false, \"disks\": [ { \"autoDelete\": true, \"boot\": true, \"image\": \"projects/rhcos-cloud/global/images/rhcos-412-86-202212081411-0-gcp-x86-64\", \"labels\": null, \"sizeGb\": 128, \"type\": \"pd-ssd\" } ], \"kind\": \"GCPMachineProviderSpec\", \"machineType\": \"a2-highgpu-1g\", \"onHostMaintenance\": \"Terminate\", \"metadata\": { \"creationTimestamp\": null }, \"networkInterfaces\": [ { \"network\": \"myclustername-2pt9p-network\", \"subnetwork\": \"myclustername-2pt9p-worker-subnet\" } ], \"preemptible\": true, \"projectID\": \"myteam\", \"region\": \"us-central1\", \"serviceAccounts\": [ { \"email\": \"[email protected]\", \"scopes\": [ \"https://www.googleapis.com/auth/cloud-platform\" ] } ], \"tags\": [ \"myclustername-2pt9p-worker\" ], \"userDataSecret\": { \"name\": \"worker-user-data\" }, \"zone\": \"us-central1-a\" } } } } }, \"status\": { \"availableReplicas\": 1, \"fullyLabeledReplicas\": 1, \"observedGeneration\": 1, \"readyReplicas\": 1, \"replicas\": 1 } }", "oc get nodes", "NAME STATUS ROLES AGE VERSION myclustername-2pt9p-master-0.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 myclustername-2pt9p-master-1.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 myclustername-2pt9p-master-2.c.openshift-qe.internal Ready control-plane,master 8h v1.26.0 myclustername-2pt9p-worker-a-mxtnz.c.openshift-qe.internal Ready worker 8h v1.26.0 myclustername-2pt9p-worker-b-9pzzn.c.openshift-qe.internal Ready worker 8h v1.26.0 myclustername-2pt9p-worker-c-6pbg6.c.openshift-qe.internal Ready worker 8h v1.26.0 myclustername-2pt9p-worker-gpu-a-wxcr6.c.openshift-qe.internal Ready worker 4h35m v1.26.0", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE myclustername-2pt9p-worker-a 1 1 1 1 8h myclustername-2pt9p-worker-b 1 1 1 1 8h myclustername-2pt9p-worker-c 1 1 8h myclustername-2pt9p-worker-f 0 0 8h", "oc get machines -n openshift-machine-api | grep worker", "myclustername-2pt9p-worker-a-mxtnz Running n2-standard-4 us-central1 us-central1-a 8h myclustername-2pt9p-worker-b-9pzzn Running n2-standard-4 us-central1 us-central1-b 8h myclustername-2pt9p-worker-c-6pbg6 Running n2-standard-4 us-central1 us-central1-c 8h", "oc get machineset myclustername-2pt9p-worker-a -n openshift-machine-api -o json > <output_file.json>", "jq .spec.template.spec.providerSpec.value.machineType ocp_4.13_machineset-a2-highgpu-1g.json \"a2-highgpu-1g\"", "\"machineType\": \"a2-highgpu-1g\", \"onHostMaintenance\": \"Terminate\",", "oc get machineset/myclustername-2pt9p-worker-a -n openshift-machine-api -o json | diff ocp_4.13_machineset-a2-highgpu-1g.json -", "15c15 < \"name\": \"myclustername-2pt9p-worker-gpu-a\", --- > \"name\": \"myclustername-2pt9p-worker-a\", 25c25 < \"machine.openshift.io/cluster-api-machineset\": \"myclustername-2pt9p-worker-gpu-a\" --- > \"machine.openshift.io/cluster-api-machineset\": \"myclustername-2pt9p-worker-a\" 34c34 < \"machine.openshift.io/cluster-api-machineset\": \"myclustername-2pt9p-worker-gpu-a\" --- > \"machine.openshift.io/cluster-api-machineset\": \"myclustername-2pt9p-worker-a\" 59,60c59 < \"machineType\": \"a2-highgpu-1g\", < \"onHostMaintenance\": \"Terminate\", --- > \"machineType\": \"n2-standard-4\",", "oc create -f ocp_4.13_machineset-a2-highgpu-1g.json", "machineset.machine.openshift.io/myclustername-2pt9p-worker-gpu-a created", "oc -n openshift-machine-api get machinesets | grep gpu", "myclustername-2pt9p-worker-gpu-a 1 1 1 1 5h24m", "oc -n openshift-machine-api get machines | grep gpu", "myclustername-2pt9p-worker-gpu-a-wxcr6 Running a2-highgpu-1g us-central1 us-central1-a 5h25m", "oc get pods -n openshift-nfd", "NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 1d", "oc get pods -n openshift-nfd", "NAME READY STATUS RESTARTS AGE nfd-controller-manager-8646fcbb65-x5qgk 2/2 Running 7 (8h ago) 12d nfd-master-769656c4cb-w9vrv 1/1 Running 0 12d nfd-worker-qjxb2 1/1 Running 3 (3d14h ago) 12d nfd-worker-xtz9b 1/1 Running 5 (3d14h ago) 12d", "oc describe node ip-10-0-132-138.us-east-2.compute.internal | egrep 'Roles|pci'", "Roles: worker feature.node.kubernetes.io/pci-1013.present=true feature.node.kubernetes.io/pci-10de.present=true feature.node.kubernetes.io/pci-1d0f.present=true", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<region> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 10 spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" providerSpec: value: apiVersion: ibmcloudproviderconfig.openshift.io/v1beta1 credentialsSecret: name: ibmcloud-credentials image: <infrastructure_id>-rhcos 11 kind: IBMCloudMachineProviderSpec primaryNetworkInterface: securityGroups: - <infrastructure_id>-sg-cluster-wide - <infrastructure_id>-sg-openshift-net subnet: <infrastructure_id>-subnet-compute-<zone> 12 profile: <instance_profile> 13 region: <region> 14 resourceGroup: <resource_group> 15 userDataSecret: name: <role>-user-data 16 vpc: <vpc_name> 17 zone: <zone> 18", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role>-<region> 4 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<region> 10 spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" providerSpec: value: apiVersion: machine.openshift.io/v1 credentialsSecret: name: powervs-credentials image: name: rhcos-<infrastructure_id> 11 type: Name keyPairName: <infrastructure_id>-key kind: PowerVSMachineProviderConfig memoryGiB: 32 network: regex: ^DHCPSERVER[0-9a-z]{32}_PrivateUSD type: RegEx processorType: Shared processors: \"0.5\" serviceInstance: id: <ibm_power_vs_service_instance_id> type: ID 12 systemType: s922 userDataSecret: name: <role>-user-data", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> name: <infrastructure_id>-<role>-<zone> 3 namespace: openshift-machine-api annotations: 4 machine.openshift.io/memoryMb: \"16384\" machine.openshift.io/vCPU: \"4\" spec: replicas: 3 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role>-<zone> spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" providerSpec: value: apiVersion: machine.openshift.io/v1 bootType: \"\" 5 categories: 6 - key: <category_name> value: <category_value> cluster: 7 type: uuid uuid: <cluster_uuid> credentialsSecret: name: nutanix-credentials image: name: <infrastructure_id>-rhcos 8 type: name kind: NutanixMachineProviderConfig memorySize: 16Gi 9 project: 10 type: name name: <project_name> subnets: - type: uuid uuid: <subnet_uuid> systemDiskSize: 120Gi 11 userDataSecret: name: <user_data_secret> 12 vcpuSockets: 4 13 vcpusPerSocket: 1 14", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role> 4 namespace: openshift-machine-api spec: replicas: <number_of_replicas> selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 6 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machine-role: <role> 8 machine.openshift.io/cluster-api-machine-type: <role> 9 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 10 spec: providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: <nova_flavor> image: <glance_image_name_or_location> serverGroupID: <optional_UUID_of_server_group> 11 kind: OpenstackProviderSpec networks: 12 - filter: {} subnets: - filter: name: <subnet_name> tags: openshiftClusterID=<infrastructure_id> 13 primarySubnet: <rhosp_subnet_UUID> 14 securityGroups: - filter: {} name: <infrastructure_id>-worker 15 serverMetadata: Name: <infrastructure_id>-worker 16 openshiftClusterID: <infrastructure_id> 17 tags: - openshiftClusterID=<infrastructure_id> 18 trunk: true userDataSecret: name: worker-user-data 19 availabilityZone: <optional_openstack_availability_zone>", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> name: <infrastructure_id>-<node_role> namespace: openshift-machine-api spec: replicas: <number_of_replicas> selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> spec: metadata: providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: <nova_flavor> image: <glance_image_name_or_location> serverGroupID: <optional_UUID_of_server_group> kind: OpenstackProviderSpec networks: - subnets: - UUID: <machines_subnet_UUID> ports: - networkID: <radio_network_UUID> 1 nameSuffix: radio fixedIPs: - subnetID: <radio_subnet_UUID> 2 tags: - sriov - radio vnicType: direct 3 portSecurity: false 4 - networkID: <uplink_network_UUID> 5 nameSuffix: uplink fixedIPs: - subnetID: <uplink_subnet_UUID> 6 tags: - sriov - uplink vnicType: direct 7 portSecurity: false 8 primarySubnet: <machines_subnet_UUID> securityGroups: - filter: {} name: <infrastructure_id>-<node_role> serverMetadata: Name: <infrastructure_id>-<node_role> openshiftClusterID: <infrastructure_id> tags: - openshiftClusterID=<infrastructure_id> trunk: true userDataSecret: name: <node_role>-user-data availabilityZone: <optional_openstack_availability_zone>", "oc label node <NODE_NAME> feature.node.kubernetes.io/network-sriov.capable=\"true\"", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> name: <infrastructure_id>-<node_role> namespace: openshift-machine-api spec: replicas: <number_of_replicas> selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <node_role> machine.openshift.io/cluster-api-machine-type: <node_role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<node_role> spec: metadata: {} providerSpec: value: apiVersion: openstackproviderconfig.openshift.io/v1alpha1 cloudName: openstack cloudsSecret: name: openstack-cloud-credentials namespace: openshift-machine-api flavor: <nova_flavor> image: <glance_image_name_or_location> kind: OpenstackProviderSpec ports: - allowedAddressPairs: 1 - ipAddress: <API_VIP_port_IP> - ipAddress: <ingress_VIP_port_IP> fixedIPs: - subnetID: <machines_subnet_UUID> 2 nameSuffix: nodes networkID: <machines_network_UUID> 3 securityGroups: - <compute_security_group_UUID> 4 - networkID: <SRIOV_network_UUID> nameSuffix: sriov fixedIPs: - subnetID: <SRIOV_subnet_UUID> tags: - sriov vnicType: direct portSecurity: False primarySubnet: <machines_subnet_UUID> serverMetadata: Name: <infrastructure_ID>-<node_role> openshiftClusterID: <infrastructure_id> tags: - openshiftClusterID=<infrastructure_id> trunk: false userDataSecret: name: worker-user-data", "networks: - subnets: - uuid: <machines_subnet_UUID> portSecurityEnabled: false portSecurityEnabled: false securityGroups: []", "openstack port set --enable-port-security --security-group <infrastructure_id>-<node_role> <main_port_ID>", "oc label node <NODE_NAME> feature.node.kubernetes.io/network-sriov.capable=\"true\"", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 machine.openshift.io/cluster-api-machine-role: <role> 2 machine.openshift.io/cluster-api-machine-type: <role> 3 name: <infrastructure_id>-<role> 4 namespace: openshift-machine-api spec: replicas: <number_of_replicas> 5 Selector: 6 matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 8 template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 9 machine.openshift.io/cluster-api-machine-role: <role> 10 machine.openshift.io/cluster-api-machine-type: <role> 11 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 12 spec: metadata: labels: node-role.kubernetes.io/<role>: \"\" 13 providerSpec: value: apiVersion: ovirtproviderconfig.machine.openshift.io/v1beta1 cluster_id: <ovirt_cluster_id> 14 template_name: <ovirt_template_name> 15 sparse: <boolean_value> 16 format: <raw_or_cow> 17 cpu: 18 sockets: <number_of_sockets> 19 cores: <number_of_cores> 20 threads: <number_of_threads> 21 memory_mb: <memory_size> 22 guaranteed_memory_mb: <memory_size> 23 os_disk: 24 size_gb: <disk_size> 25 storage_domain_id: <storage_domain_UUID> 26 network_interfaces: 27 vnic_profile_id: <vnic_profile_id> 28 credentialsSecret: name: ovirt-credentials 29 kind: OvirtMachineProviderSpec type: <workload_type> 30 auto_pinning_policy: <auto_pinning_policy> 31 hugepages: <hugepages> 32 affinityGroupsNames: - compute 33 userDataSecret: name: worker-user-data", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 3 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 4 template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machine-role: <role> 6 machine.openshift.io/cluster-api-machine-type: <role> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 8 spec: metadata: creationTimestamp: null labels: node-role.kubernetes.io/<role>: \"\" 9 providerSpec: value: apiVersion: vsphereprovider.openshift.io/v1beta1 credentialsSecret: name: vsphere-cloud-credentials diskGiB: 120 kind: VSphereMachineProviderSpec memoryMiB: 8192 metadata: creationTimestamp: null network: devices: - networkName: \"<vm_network_name>\" 10 numCPUs: 4 numCoresPerSocket: 1 snapshot: \"\" template: <vm_template_name> 11 userDataSecret: name: worker-user-data workspace: datacenter: <vcenter_datacenter_name> 12 datastore: <vcenter_datastore_name> 13 folder: <vcenter_vm_folder_path> 14 resourcepool: <vsphere_resource_pool> 15 server: <vcenter_server_ip> 16", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc get infrastructure cluster -o jsonpath='{.status.infrastructureName}'", "oc get secret -n openshift-machine-api vsphere-cloud-credentials -o go-template='{{range USDk,USDv := .data}}{{printf \"%s: \" USDk}}{{if not USDv}}{{USDv}}{{else}}{{USDv | base64decode}}{{end}}{{\"\\n\"}}{{end}}'", "<vcenter-server>.password=<openshift-user-password> <vcenter-server>.username=<openshift-user>", "oc create secret generic vsphere-cloud-credentials -n openshift-machine-api --from-literal=<vcenter-server>.username=<openshift-user> --from-literal=<vcenter-server>.password=<openshift-user-password>", "oc get secret -n openshift-machine-api worker-user-data -o go-template='{{range USDk,USDv := .data}}{{printf \"%s: \" USDk}}{{if not USDv}}{{USDv}}{{else}}{{USDv | base64decode}}{{end}}{{\"\\n\"}}{{end}}'", "disableTemplating: false userData: 1 { \"ignition\": { }, }", "oc create secret generic worker-user-data -n openshift-machine-api --from-file=<installation_directory>/worker.ign", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet template: spec: providerSpec: value: apiVersion: machine.openshift.io/v1beta1 credentialsSecret: name: vsphere-cloud-credentials 1 diskGiB: 120 kind: VSphereMachineProviderSpec memoryMiB: 16384 network: devices: - networkName: \"<vm_network_name>\" numCPUs: 4 numCoresPerSocket: 4 snapshot: \"\" template: <vm_template_name> 2 userDataSecret: name: worker-user-data 3 workspace: datacenter: <vcenter_datacenter_name> datastore: <vcenter_datastore_name> folder: <vcenter_vm_folder_path> resourcepool: <vsphere_resource_pool> server: <vcenter_server_address> 4", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1", "oc get infrastructure cluster -o jsonpath='{.status.platform}'", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 3 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 4 template: metadata: creationTimestamp: null labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 5 machine.openshift.io/cluster-api-machine-role: <role> 6 machine.openshift.io/cluster-api-machine-type: <role> 7 machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> 8 spec: metadata: creationTimestamp: null labels: node-role.kubernetes.io/<role>: \"\" 9 providerSpec: value: apiVersion: baremetal.cluster.k8s.io/v1alpha1 hostSelector: {} image: checksum: http:/172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2.<md5sum> 10 url: http://172.22.0.3:6181/images/rhcos-<version>.<architecture>.qcow2 11 kind: BareMetalMachineProviderSpec metadata: creationTimestamp: null userData: name: worker-user-data", "oc get -o jsonpath='{.status.infrastructureName}{\"\\n\"}' infrastructure cluster", "oc get machinesets -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "oc get machineset <machineset_name> -n openshift-machine-api -o yaml", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> 1 name: <infrastructure_id>-<role> 2 namespace: openshift-machine-api spec: replicas: 1 selector: matchLabels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> template: metadata: labels: machine.openshift.io/cluster-api-cluster: <infrastructure_id> machine.openshift.io/cluster-api-machine-role: <role> machine.openshift.io/cluster-api-machine-type: <role> machine.openshift.io/cluster-api-machineset: <infrastructure_id>-<role> spec: providerSpec: 3", "oc create -f <file_name>.yaml", "oc get machineset -n openshift-machine-api", "NAME DESIRED CURRENT READY AVAILABLE AGE agl030519-vplxk-infra-us-east-1a 1 1 1 1 11m agl030519-vplxk-worker-us-east-1a 1 1 1 1 55m agl030519-vplxk-worker-us-east-1b 1 1 1 1 55m agl030519-vplxk-worker-us-east-1c 1 1 1 1 55m agl030519-vplxk-worker-us-east-1d 0 0 55m agl030519-vplxk-worker-us-east-1e 0 0 55m agl030519-vplxk-worker-us-east-1f 0 0 55m", "apiVersion: machine.openshift.io/v1beta1 kind: MachineSet metadata: name: machine-set-name spec: template: spec: metadata: labels: cluster-api/accelerator: nvidia-t4 1" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html/machine_management/managing-compute-machines-with-the-machine-api
Chapter 1. Planning for automation mesh in your VM-based Red Hat Ansible Automation Platform environment
Chapter 1. Planning for automation mesh in your VM-based Red Hat Ansible Automation Platform environment The following topics contain information to help plan an automation mesh deployment in your VM-based Ansible Automation Platform environment. The subsequent sections explain the concepts that comprise automation mesh in addition to providing examples on how you can design automation mesh topologies. Simple to complex topology examples are included to illustrate the various ways you can deploy automation mesh. 1.1. About automation mesh Automation mesh is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers through nodes that establish peer-to-peer connections with each other using existing networks. Red Hat Ansible Automation Platform 2 replaces Ansible Tower and isolated nodes with automation controller and automation hub. Automation controller provides the control plane for automation through its UI, RESTful API, RBAC, workflows and CI/CD integration, while automation mesh can be used for setting up, discovering, changing or modifying the nodes that form the control and execution layers. Automation mesh uses TLS encryption for communication, so traffic that traverses external networks (the internet or other) is encrypted in transit. Automation mesh introduces: Dynamic cluster capacity that scales independently, enabling you to create, register, group, ungroup and deregister nodes with minimal downtime. Control and execution plane separation that enables you to scale playbook execution capacity independently from control plane capacity. Deployment choices that are resilient to latency, reconfigurable without outage, and that dynamically re-reroute to choose a different path when outages occur. Connectivity that includes bi-directional, multi-hopped mesh communication possibilities which are Federal Information Processing Standards (FIPS) compliant. 1.2. Control and execution planes Automation mesh makes use of unique node types to create both the control and execution plane. Learn more about the control and execution plane and their node types before designing your automation mesh topology. 1.2.1. Control plane The control plane consists of hybrid and control nodes. Instances in the control plane run persistent automation controller services such as the the web server and task dispatcher, in addition to project updates, and management jobs. Hybrid nodes - this is the default node type for control plane nodes, responsible for automation controller runtime functions like project updates, management jobs and ansible-runner task operations. Hybrid nodes are also used for automation execution. Control nodes - control nodes run project and inventory updates and system jobs, but not regular jobs. Execution capabilities are disabled on these nodes. 1.2.2. Execution plane The execution plane consists of execution nodes that execute automation on behalf of the control plane and have no control functions. Hop nodes serve to communicate. Nodes in the execution plane only run user-space jobs, and may be geographically separated, with high latency, from the control plane. Execution nodes - Execution nodes run jobs under ansible-runner with podman isolation. This node type is similar to isolated nodes. This is the default node type for execution plane nodes. Hop nodes - similar to a jump host, hop nodes route traffic to other execution nodes. Hop nodes cannot execute automation. 1.2.3. Peers Peer relationships define node-to-node connections. You can define peers within the [automationcontroller] and [execution_nodes] groups or using the [automationcontroller:vars] or [execution_nodes:vars] groups 1.2.4. Defining automation mesh node types The examples in this section demonstrate how to set the node type for the hosts in your inventory file. You can set the node_type for single nodes in the control plane or execution plane inventory groups. To define the node type for an entire group of nodes, set the node_type in the vars stanza for the group. The permitted values for node_type in the control plane [automationcontroller] group are hybrid (default) and control . The permitted values for node_type in the [execution_nodes] group are execution (default) and hop . Hybrid node The following inventory consists of a single hybrid node in the control plane: [automationcontroller] control-plane-1.example.com Control node The following inventory consists of a single control node in the control plane: [automationcontroller] control-plane-1.example.com node_type=control If you set node_type to control in the vars stanza for the control plane nodes, then all of the nodes in control plane are control nodes. [automationcontroller] control-plane-1.example.com [automationcontroller:vars] node_type=control Execution node The following stanza defines a single execution node in the execution plane: [execution_nodes] execution-plane-1.example.com Hop node The following stanza defines a single hop node and an execution node in the execution plane. The node_type variable is set for every individual node. [execution_nodes] execution-plane-1.example.com node_type=hop execution-plane-2.example.com If you want to set the node_type at the group level, you must create separate groups for the execution nodes and the hop nodes. [execution_nodes] execution-plane-1.example.com execution-plane-2.example.com [execution_group] execution-plane-2.example.com [execution_group:vars] node_type=execution [hop_group] execution-plane-1.example.com [hop_group:vars] node_type=hop Peer connections Create node-to-node connections using the peers= host variable. The following example connects control-plane-1.example.com to execution-node-1.example.com and execution-node-1.example.com to execution-node-2.example.com : [automationcontroller] control-plane-1.example.com peers=execution-node-1.example.com [automationcontroller:vars] node_type=control [execution_nodes] execution-node-1.example.com peers=execution-node-2.example.com execution-node-2.example.com Additional resources See the example automation mesh topologies in this guide for more examples of how to implement mesh nodes.
[ "[automationcontroller] control-plane-1.example.com", "[automationcontroller] control-plane-1.example.com node_type=control", "[automationcontroller] control-plane-1.example.com [automationcontroller:vars] node_type=control", "[execution_nodes] execution-plane-1.example.com", "[execution_nodes] execution-plane-1.example.com node_type=hop execution-plane-2.example.com", "[execution_nodes] execution-plane-1.example.com execution-plane-2.example.com [execution_group] execution-plane-2.example.com [execution_group:vars] node_type=execution [hop_group] execution-plane-1.example.com [hop_group:vars] node_type=hop", "[automationcontroller] control-plane-1.example.com peers=execution-node-1.example.com [automationcontroller:vars] node_type=control [execution_nodes] execution-node-1.example.com peers=execution-node-2.example.com execution-node-2.example.com" ]
https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/red_hat_ansible_automation_platform_automation_mesh_guide_for_vm-based_installations/assembly-planning-mesh
4.6. Red Hat Enterprise Linux-Specific Information
4.6. Red Hat Enterprise Linux-Specific Information Due to the inherent complexity of being a demand-paged virtual memory operating system, monitoring memory-related resources under Red Hat Enterprise Linux can be confusing. Therefore, it is best to start with the more straightforward tools, and work from there. Using free , it is possible to get a concise (if somewhat simplistic) overview of memory and swap utilization. Here is an example: We note that this system has 1.2GB of RAM, of which only about 350MB is actually in use. As expected for a system with this much free RAM, none of the 500MB swap partition is in use. Contrast that example with this one: This system has about 256MB of RAM, the majority of which is in use, leaving only about 8MB free. Over 100MB of the 512MB swap partition is in use. Although this system is certainly more limited in terms of memory than the first system, to determine if this memory limitation is causing performance problems we must dig a bit deeper. Although more cryptic than free , vmstat has the benefit of displaying more than memory utilization statistics. Here is the output from vmstat 1 10 : During this 10-second sample, the amount of free memory (the free field) varies somewhat, and there is a bit of swap-related I/O (the si and so fields), but overall this system is running well. It is doubtful, however, how much additional workload it could handle, given the current memory utilization. When researching memory-related issues, it is often necessary to determine how the Red Hat Enterprise Linux virtual memory subsystem is making use of system memory. By using sar , it is possible to examine this aspect of system performance in much more detail. By reviewing the sar -r report, we can examine memory and swap utilization more closely: The kbmemfree and kbmemused fields show the typical free and used memory statistics, with the percentage of memory used displayed in the %memused field. The kbbuffers and kbcached fields show how many kilobytes of memory are allocated to buffers and the system-wide data cache. The kbmemshrd field is always zero for systems (such as Red Hat Enterprise Linux) using the 2.4 Linux kernel. The lines for this report have been truncated to fit on the page. Here is the remainder of each line, with the timestamp added to the left to make reading easier: For swap utilization, the kbswpfree and kbswpused fields show the amount of free and used swap space, in kilobytes, with the %swpused field showing the swap space used as a percentage. To learn more about the swapping activity taking place, use the sar -W report. Here is an example: Here we notice that, on average, there were three times fewer pages being brought in from swap ( pswpin/s ) as there were going out to swap ( pswpout/s ). To better understand how pages are being used, refer to the sar -B report: Here we can determine how many blocks per second are paged in from disk ( pgpgin/s ) and paged out to disk ( pgpgout/s ). These statistics serve as a barometer of overall virtual memory activity. However, more knowledge can be gained by examining the other fields in this report. The Red Hat Enterprise Linux kernel marks all pages as either active or inactive. As the names imply, active pages are currently in use in some manner (as process or buffer pages, for example), while inactive pages are not. This example report shows that the list of active pages (the activepg field) averages approximately 660MB [13] . The remainder of the fields in this report concentrate on the inactive list -- pages that, for one reason or another, have not recently been used. The inadtypg field shows how many inactive pages are dirty (modified) and may need to be written to disk. The inaclnpg field, on the other hand, shows how many inactive pages are clean (unmodified) and do not need to be written to disk. The inatarpg field represents the desired size of the inactive list. This value is calculated by the Linux kernel and is sized such that the inactive list remains large enough to act as a pool for page replacement purposes. For additional insight into page status (specifically, how often pages change status), use the sar -R report. Here is a sample report: The statistics in this particular sar report are unique, in that they may be positive, negative, or zero. When positive, the value indicates the rate at which pages of this type are increasing. When negative, the value indicates the rate at which pages of this type are decreasing. A value of zero indicates that pages of this type are neither increasing or decreasing. In this example, the last sample shows slightly over three pages per second being allocated from the list of free pages (the frmpg/s field) and nearly 1 page per second added to the page cache (the campg/s field). The list of pages used as buffers (the bufpg/s field) gained approximately one page every two seconds, while the shared memory page list (the shmpg/s field) neither gained nor lost any pages. [13] The page size under Red Hat Enterprise Linux on the x86 system used in this example is 4096 bytes. Systems based on other architectures may have different page sizes.
[ "total used free shared buffers cached Mem: 1288720 361448 927272 0 27844 187632 -/+ buffers/cache: 145972 1142748 Swap: 522104 0 522104", "total used free shared buffers cached Mem: 255088 246604 8484 0 6492 111320 -/+ buffers/cache: 128792 126296 Swap: 530136 111308 418828", "procs -----------memory---------- ---swap-- -----io---- --system-- ----cpu---- r b swpd free buff cache si so bi bo in cs us sy id wa 2 0 111304 9728 7036 107204 0 0 6 10 120 24 10 2 89 0 2 0 111304 9728 7036 107204 0 0 0 0 526 1653 96 4 0 0 1 0 111304 9616 7036 107204 0 0 0 0 552 2219 94 5 1 0 1 0 111304 9616 7036 107204 0 0 0 0 624 699 98 2 0 0 2 0 111304 9616 7052 107204 0 0 0 48 603 1466 95 5 0 0 3 0 111304 9620 7052 107204 0 0 0 0 768 932 90 4 6 0 3 0 111304 9440 7076 107360 92 0 244 0 820 1230 85 9 6 0 2 0 111304 9276 7076 107368 0 0 0 0 832 1060 87 6 7 0 3 0 111304 9624 7092 107372 0 0 16 0 813 1655 93 5 2 0 2 0 111304 9624 7108 107372 0 0 0 972 1189 1165 68 9 23 0", "Linux 2.4.20-1.1931.2.231.2.10.ent (pigdog.example.com) 07/22/2003 12:00:01 AM kbmemfree kbmemused %memused kbmemshrd kbbuffers kbcached 12:10:00 AM 240468 1048252 81.34 0 133724 485772 12:20:00 AM 240508 1048212 81.34 0 134172 485600 ... 08:40:00 PM 934132 354588 27.51 0 26080 185364 Average: 324346 964374 74.83 0 96072 467559", "12:00:01 AM kbswpfree kbswpused %swpused 12:10:00 AM 522104 0 0.00 12:20:00 AM 522104 0 0.00 ... 08:40:00 PM 522104 0 0.00 Average: 522104 0 0.00", "Linux 2.4.20-1.1931.2.231.2.10.entsmp (raptor.example.com) 07/22/2003 12:00:01 AM pswpin/s pswpout/s 12:10:01 AM 0.15 2.56 12:20:00 AM 0.00 0.00 ... 03:30:01 PM 0.42 2.56 Average: 0.11 0.37", "Linux 2.4.20-1.1931.2.231.2.10.entsmp (raptor.example.com) 07/22/2003 12:00:01 AM pgpgin/s pgpgout/s activepg inadtypg inaclnpg inatarpg 12:10:00 AM 0.03 8.61 195393 20654 30352 49279 12:20:00 AM 0.01 7.51 195385 20655 30336 49275 ... 08:40:00 PM 0.00 7.79 71236 1371 6760 15873 Average: 201.54 201.54 169367 18999 35146 44702", "Linux 2.4.20-1.1931.2.231.2.10.entsmp (raptor.example.com) 07/22/2003 12:00:01 AM frmpg/s shmpg/s bufpg/s campg/s 12:10:00 AM -0.10 0.00 0.12 -0.07 12:20:00 AM 0.02 0.00 0.19 -0.07 ... 08:50:01 PM -3.19 0.00 0.46 0.81 Average: 0.01 0.00 -0.00 -0.00" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/introduction_to_system_administration/s1-memory-rhlspec
Chapter 59. Defining data tables for guided rule templates
Chapter 59. Defining data tables for guided rule templates After you create a guided rule template and add template keys for field values, a data table is displayed in the Data table of the guided rule templates designer. Each column in the data table corresponds to a template key that you added in the guided rule template. Use this table to define values for each template key row by row. Each row of values that you define in the data table for that template results in a rule. Procedure In the guided rule templates designer, click the Data tab to view the data table. Each column in the data table corresponds to a template key that you added in the guided rule template. Note If you did not add any template keys to the rule template, then this data table does not appear and the template does not function as a genuine template but essentially as an individual guided rule. For this reason, template keys are fundamental in creating guided rule templates. Click Add row and define the data values for each template key column to generate that rule (row). Continue adding rows and defining data values for each rule that will be generated. You can click Add row for each new row, or click the plus icon ( ) or minus icon to add or remove rows. Figure 59.1. Sample data table for a guided rule template To view the DRL code, click the Source tab in the guided rule templates designer. Example: As a visual aid, click the grid icon in the upper-left corner of the data table to toggle cell merging on and off, if needed. Cells in the same column with identical values are merged into a single cell. Figure 59.2. Merge cells in a data table You can then use the expand/collapse icon [+/-] in the upper-left corner of each newly merged cell to collapse the rows corresponding to the merged cell, or to re-expand the collapsed rows. Figure 59.3. Collapse merged cells After you define the template key data for all rules and adjust the table display as needed, click Validate in the upper-right toolbar of the guided rule templates designer to validate the guided rule template. If the rule template validation fails, address any problems described in the error message, review all components in the rule template and data defined in the data table, and try again to validate the rule template until the rule template passes. Click Save in the guided rule templates designer to save your work.
[ "rule \"PaymentRules_6\" when Customer( internetService == false , phoneService == false , TVService == true ) then RecurringPayment fact0 = new RecurringPayment(); fact0.setAmount( 5 ); insertLogical( fact0 ); end rule \"PaymentRules_5\" when Customer( internetService == false , phoneService == true , TVService == false ) then RecurringPayment fact0 = new RecurringPayment(); fact0.setAmount( 5 ); insertLogical( fact0 ); end //Other rules omitted for brevity." ]
https://docs.redhat.com/en/documentation/red_hat_decision_manager/7.13/html/developing_decision_services_in_red_hat_decision_manager/guided-rule-templates-tables-proc
Chapter 21. SELinux
Chapter 21. SELinux Security-Enhanced Linux , or SELinux , is a security architecture integrated into the current kernel using the linux security modules ( LSM ). It is a project of the United States National Security Agency (NSA) and the SELinux community. SELinux integration into Red Hat Enterprise Linux was a joint effort between the NSA and Red Hat. 21.1. Introduction to SELinux SELinux provides a flexible mandatory access control ( MAC ) system built into the Linux kernel. Under standard Linux discretionary access control ( DAC ), an application or process running as a user (UID or SUID) has the user's permissions to objects such as files, sockets, and other processes. Running an SELinux MAC kernel protects the system from malicious or flawed applications that can damage or destroy the system. SELinux defines the access and transition rights of every user, application, process, and file on the system. SELinux then governs the interactions of these subjects and objects using a security policy that specifies how strict or lenient a given Red Hat Enterprise Linux installation should be. For the most part, SELinux is almost completely invisible to system users. Only system administrators must worry about how strict a policy to implement for their server environment. The policy can be as strict or lenient as needed, and is very finely detailed. This detail gives the SELinux kernel complete, granular control over the entire system. When a subject such as an application attempts to access an object such as a file, the policy enforcement server in the kernel checks an access vector cache ( AVC ), where subject and object permissions are cached. If a decision cannot be made based on data in the AVC, the request continues to the security server, which looks up the security context of the application and the file in a matrix. Permission is then granted or denied, with an avc: denied message detailed in /var/log/messages . Subjects and objects gain their security context from installed policy, which also provides the information to populate the security server's matrix. In addition to running in an enforcing mode, SELinux can run in a permissive mode, where the AVC is checked and denials are logged, but SELinux does not enforce the policy. For more information about how SELinux works, refer to Section 21.3, " Additional Resources " .
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/reference_guide/ch-selinux
Making open source more inclusive
Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message .
null
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/17.1/html/backing_up_and_restoring_the_undercloud_and_control_plane_nodes/making-open-source-more-inclusive
Chapter 9. Broker Clusters
Chapter 9. Broker Clusters You can connect brokers together to form a cluster. Broker clusters enable you to distribute message processing load and balance client connections. They also provide fault tolerance by increasing the number of brokers to which clients can connect. 9.1. Broker Clustering Changes In AMQ Broker 7, broker networks are called broker clusters. The brokers in the cluster are connected by cluster connections (which reference connector elements). Members of a cluster can be configured to discover each other dynamically (using UDP or JGroups), or statically (by manually specifying a list of cluster members). A cluster configuration is a required prerequisite for high-availability (HA). You must configure the cluster before you can configure HA, even if the cluster consists of only a single live broker. You can configure broker clusters in many different topologies, though symmetric and chain clusters are the most common. Regardless of the topology, you can scale clusters up and down without message loss (as long as you have configured the broker to send its messages to another broker in the cluster). Broker clusters distribute (and redistribute) messages differently than broker networks in AMQ 6. In AMQ 6, messages always arrived on a specific queue and were then pulled from one broker to another based on consumer interest. In AMQ Broker 7, queue definitions and consumers are shared across the cluster, and messages are routed across the cluster as they are received at the broker. Important Do not attempt to combine AMQ 6 brokers and AMQ Broker 7 brokers in the same cluster. 9.2. How Broker Clusters are Configured You configure a broker cluster by creating a broker instance for each member of the cluster, and then adding the cluster settings to each broker instance. Cluster settings consist of the following: Discovery groups For use with dynamic discovery, a discovery group defines how the broker instance discovers other members in the cluster. Discovery can use either UDP or JGroups. Broadcast groups For use with dynamic discovery, a broadcast group defines how the broker instance transmits cluster-related information to other members in the cluster. Broadcast can use either UDP or JGroups, but it must match its discovery groups counterpart. Cluster connections How the broker instance should connect to other members of the cluster. You can specify a discovery group or a static list of cluster members. You can also specify message redistribution and max hop properties. 9.2.1. Creating a Broker Cluster This procedure demonstrates how to create a basic, two-broker cluster with static discovery. Procedure Create the first broker instance by using the artemis create command. This example creates a new broker instance called broker1 . Create a second broker instance for the second member of the cluster. For each additional broker instance, you should use the --port-offset parameter to avoid port collisions with the broker instances. This example creates a second broker instance called broker2 . For the first broker instance, open the BROKER_INSTANCE_DIR /etc/broker.xml configuration file and add the cluster settings. For static discovery, you must add a connector and a static cluster connection. This example configures broker1 to connect to broker2 . <!-- Connectors --> <connectors> <connector name="netty-connector">tcp://localhost:61616</connector> <!-- connector to broker2 --> <connector name="broker2-connector">tcp://localhost:61617</connector> </connectors> <!-- Clustering configuration --> <cluster-connections> <cluster-connection name="my-cluster"> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>1</max-hops> <static-connectors> <connector-ref>broker2-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections> For the second broker instance, open the BROKER_INSTANCE_DIR /etc/broker.xml configuration file and add the cluster settings. This example configures broker2 to connect to broker1 . <!-- Connectors --> <connectors> <connector name="netty-connector">tcp://localhost:61617</connector> <!-- connector to broker1 --> <connector name="broker1-connector">tcp://localhost:61616</connector> </connectors> <!-- Clustering configuration --> <cluster-connections> <cluster-connection name="my-cluster"> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>1</max-hops> <static-connectors> <connector-ref>broker1-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections> Related Information For full details about creating broker clusters and configuring message redistribution and client load balancing, see Setting up a broker cluster in Configuring AMQ Broker . 9.2.2. Additional Broker Cluster Topologies Broker clusters can be connected in many different topologies. In AMQ Broker 7, symmetric and chain clusters are the most common. Example: Symmetric Cluster In a full mesh topology, each broker is connected to every other broker in the cluster. This means that every broker in the cluster is no more than one hop away from every other broker. This example uses dynamic discovery to enable the brokers in the cluster to discover each other. By setting max-hops to 1 , each broker will connect to every other broker: <!-- Clustering configuration --> <broadcast-groups> <broadcast-group name="my-broadcast-group"> <group-address>USD{udp-address:231.7.7.7}</group-address> <group-port>9876</group-port> <broadcast-period>100</broadcast-period> <connector-ref>netty-connector</connector-ref> </broadcast-group> </broadcast-groups> <discovery-groups> <discovery-group name="my-discovery-group"> <group-address>USD{udp-address:231.7.7.7}</group-address> <group-port>9876</group-port> <refresh-timeout>10000</refresh-timeout> </discovery-group> </discovery-groups> <cluster-connections> <cluster-connection name="my-cluster"> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>ON_DEMAND</message-load-balancing> <max-hops>1</max-hops> <discovery-group-ref discovery-group-name="my-discovery-group"/> </cluster-connection> </cluster-connections> Example: Chain Cluster In a chain cluster, the brokers form a linear "chain" with a broker on each end and all other brokers connecting to the and brokers in the chain (for example, A->B->C). This example uses static discovery to connect three brokers into a chain cluster. Each broker connects to the broker in the chain, and max-hops is set to 2 to enable messages to flow through the full chain. The first broker is configured like this: <connectors> <connector name="netty-connector">tcp://localhost:61616</connector> <!-- connector to broker2 --> <connector name="broker2-connector">tcp://localhost:61716</connector> </connectors> <cluster-connections> <cluster-connection name="my-cluster"> <address>jms</address> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>2</max-hops> <static-connectors allow-direct-connections-only="true"> <connector-ref>broker2-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections> The second broker is configured like this: <connectors> <connector name="netty-connector">tcp://localhost:61716</connector> <!-- connector to broker3 --> <connector name="broker3-connector">tcp://localhost:61816</connector> </connectors> <cluster-connections> <cluster-connection name="my-cluster"> <address>jms</address> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>1</max-hops> <static-connectors allow-direct-connections-only="true"> <connector-ref>broker3-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections> Finally, the third broker is configured like this: <connectors> <connector name="netty-connector">tcp://localhost:61816</connector> </connectors> <cluster-connections> <cluster-connection name="my-cluster"> <address>jms</address> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>0</max-hops> </cluster-connection> </cluster-connections> 9.3. Broker Cluster Configuration Properties The following table compares the broker network configuration properties in AMQ 6 to the equivalent cluster-connection properties in AMQ Broker 7: To set... In AMQ 6 In AMQ Broker 7 Excluded destinations excludedDestinations No equivalent. The number of hops that a message can make through the cluster networkTTL The default is 1 , which means that a message can make just one hop to a neighboring broker. <max-hops> Sets this broker instance to load balance messages to brokers which might be connected to it indirectly with other brokers are intermediaries in a chain. The default is 1 , which means that messages are distributed only to other brokers directly connected to this broker instance. Replay messages when there are no consumers replayWhenNoConsumers No equivalent. However, you can set <redistribution-delay> to define the amount of time with no consumers (in milliseconds) after which messages should be redelivered as though arriving for the first time. Whether to broadcast advisory messages for temporary destinations in the cluster bridgeTempDestinations The default is true . This property was typically used for temporary destinations created for request-reply messages. This would enable consumers of these messages to be connected to another broker in the network and still be able to send the reply to the temporary destination specified in the JMSReplyTo header. No equivalent. In AMQ Broker 7, temporary destinations are never clustered. The credentials to use to authenticate this broker with a remote broker userName password <cluster-user> <cluster-password> Set the route priority for a connector decreaseNetworkConsumerPriority The default is false . If set to true , local consumers have a priority of 0 , and network subscriptions have a priority of -5 . In addition, the priority of a network subscription is reduced by 1 for every network hop that it traverses. No equivalent. Whether and how messages should be distributed between other brokers in the cluster No equivalent. <message-load-balancing> This can be set to OFF (no load balancing), STRICT (forward messages to all brokers in the cluster that have a matching queue), or ON_DEMAND (forward messages only to brokers in the cluster that have active consumers or a matching selector). The default is ON_DEMAND . Enable a cluster network connection to both produce and consume messages duplex By default, network connectors are unidirectional. However, you could set them to duplex to enable messages to flow in both directions. This was typically used for hub-and-spoke networks in which the hub was behind a firewall. No equivalent. Cluster connections are unidirectional only. However, you can configure a pair of cluster connections between each broker, one from each end. For more information about setting up a broker cluster, see Setting up a broker cluster in Configuring AMQ Broker .
[ "sudo INSTALL_DIR /bin/artemis create broker1 --user user --password pass --role amq", "sudo INSTALL_DIR /bin/artemis create broker2 --port-offset 100 --user user --password pass --role amq", "<!-- Connectors --> <connectors> <connector name=\"netty-connector\">tcp://localhost:61616</connector> <!-- connector to broker2 --> <connector name=\"broker2-connector\">tcp://localhost:61617</connector> </connectors> <!-- Clustering configuration --> <cluster-connections> <cluster-connection name=\"my-cluster\"> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>1</max-hops> <static-connectors> <connector-ref>broker2-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections>", "<!-- Connectors --> <connectors> <connector name=\"netty-connector\">tcp://localhost:61617</connector> <!-- connector to broker1 --> <connector name=\"broker1-connector\">tcp://localhost:61616</connector> </connectors> <!-- Clustering configuration --> <cluster-connections> <cluster-connection name=\"my-cluster\"> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>1</max-hops> <static-connectors> <connector-ref>broker1-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections>", "<!-- Clustering configuration --> <broadcast-groups> <broadcast-group name=\"my-broadcast-group\"> <group-address>USD{udp-address:231.7.7.7}</group-address> <group-port>9876</group-port> <broadcast-period>100</broadcast-period> <connector-ref>netty-connector</connector-ref> </broadcast-group> </broadcast-groups> <discovery-groups> <discovery-group name=\"my-discovery-group\"> <group-address>USD{udp-address:231.7.7.7}</group-address> <group-port>9876</group-port> <refresh-timeout>10000</refresh-timeout> </discovery-group> </discovery-groups> <cluster-connections> <cluster-connection name=\"my-cluster\"> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>ON_DEMAND</message-load-balancing> <max-hops>1</max-hops> <discovery-group-ref discovery-group-name=\"my-discovery-group\"/> </cluster-connection> </cluster-connections>", "<connectors> <connector name=\"netty-connector\">tcp://localhost:61616</connector> <!-- connector to broker2 --> <connector name=\"broker2-connector\">tcp://localhost:61716</connector> </connectors> <cluster-connections> <cluster-connection name=\"my-cluster\"> <address>jms</address> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>2</max-hops> <static-connectors allow-direct-connections-only=\"true\"> <connector-ref>broker2-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections>", "<connectors> <connector name=\"netty-connector\">tcp://localhost:61716</connector> <!-- connector to broker3 --> <connector name=\"broker3-connector\">tcp://localhost:61816</connector> </connectors> <cluster-connections> <cluster-connection name=\"my-cluster\"> <address>jms</address> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>1</max-hops> <static-connectors allow-direct-connections-only=\"true\"> <connector-ref>broker3-connector</connector-ref> </static-connectors> </cluster-connection> </cluster-connections>", "<connectors> <connector name=\"netty-connector\">tcp://localhost:61816</connector> </connectors> <cluster-connections> <cluster-connection name=\"my-cluster\"> <address>jms</address> <connector-ref>netty-connector</connector-ref> <retry-interval>500</retry-interval> <use-duplicate-detection>true</use-duplicate-detection> <message-load-balancing>STRICT</message-load-balancing> <max-hops>0</max-hops> </cluster-connection> </cluster-connections>" ]
https://docs.redhat.com/en/documentation/red_hat_amq/2021.q2/html/migrating_to_red_hat_amq_7/broker_clusters
function::usymfileline
function::usymfileline Name function::usymfileline - Return the file name and line number of an address. Synopsis Arguments addr The address to translate. Description Returns the file name and the (approximate) line number of the given address, if known. If the file name or the line number cannot be found, the hex string representation of the address will be returned.
[ "usymfileline:string(addr:long)" ]
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/systemtap_tapset_reference/api-usymfileline
2.6. TCP Wrappers and xinetd
2.6. TCP Wrappers and xinetd Controlling access to network services is one of the most important security tasks facing a server administrator. Red Hat Enterprise Linux provides several tools for this purpose. For example, an iptables -based firewall filters out unwelcome network packets within the kernel's network stack. For network services that utilize it, TCP Wrappers add an additional layer of protection by defining which hosts are or are not allowed to connect to " wrapped " network services. One such wrapped network service is the xinetd super server . This service is called a super server because it controls connections to a subset of network services and further refines access control. Figure 2.4, "Access Control to Network Services" is a basic illustration of how these tools work together to protect network services. Figure 2.4. Access Control to Network Services For more information about using firewalls with iptables , see Section 2.8.9, "IPTables" . 2.6.1. TCP Wrappers The TCP Wrappers packages ( tcp_wrappers and tcp_wrappers-libs ) are installed by default and provide host-based access control to network services. The most important component within the package is the /lib/libwrap.so or /lib64/libwrap.so library. In general terms, a TCP-wrapped service is one that has been compiled against the libwrap.so library. When a connection attempt is made to a TCP-wrapped service, the service first references the host's access files ( /etc/hosts.allow and /etc/hosts.deny ) to determine whether or not the client is allowed to connect. In most cases, it then uses the syslog daemon ( syslogd ) to write the name of the requesting client and the requested service to /var/log/secure or /var/log/messages . If a client is allowed to connect, TCP Wrappers release control of the connection to the requested service and take no further part in the communication between the client and the server. In addition to access control and logging, TCP Wrappers can execute commands to interact with the client before denying or releasing control of the connection to the requested network service. Because TCP Wrappers are a valuable addition to any server administrator's arsenal of security tools, most network services within Red Hat Enterprise Linux are linked to the libwrap.so library. Such applications include /usr/sbin/sshd , /usr/sbin/sendmail , and /usr/sbin/xinetd . Note To determine if a network service binary is linked to libwrap.so , type the following command as the root user: ldd <binary-name> | grep libwrap Replace <binary-name> with the name of the network service binary. If the command returns straight to the prompt with no output, then the network service is not linked to libwrap.so . The following example indicates that /usr/sbin/sshd is linked to libwrap.so : 2.6.1.1. Advantages of TCP Wrappers TCP Wrappers provide the following advantages over other network service control techniques: Transparency to both the client and the wrapped network service - Both the connecting client and the wrapped network service are unaware that TCP Wrappers are in use. Legitimate users are logged and connected to the requested service while connections from banned clients fail. Centralized management of multiple protocols - TCP Wrappers operate separately from the network services they protect, allowing many server applications to share a common set of access control configuration files, making for simpler management.
[ "~]# ldd /usr/sbin/sshd | grep libwrap libwrap.so.0 => /lib/libwrap.so.0 (0x00655000)" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/security_guide/sect-security_guide-tcp_wrappers_and_xinetd
5.6. Modifying and Deleting Fencing Devices
5.6. Modifying and Deleting Fencing Devices Use the following command to modify or add options to a currently configured fencing device. Use the following command to remove a fencing device from the current configuration.
[ "pcs stonith update stonith_id [ stonith_device_options ]", "pcs stonith delete stonith_id" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/high_availability_add-on_reference/s1-fencedevicemodify-haar
2.4. Authentication
2.4. Authentication Support for central management of SSH keys, BZ# 803822 Previously, it was not possible to centrally manage host and user SSH public keys. Red Hat Enterprise Linux 6.3 includes SSH public key management for Identity Management servers as a Technology Preview. OpenSSH on Identity Management clients is automatically configured to use public keys which are stored on the Identity Management server. SSH host and user identities can now be managed centrally in Identity Management. Package: sssd-1.8.0-32 SELinux user mapping, BZ# 803821 Red Hat Enterprise Linux 6.3 introduces the ability to control the SELinux context of a user on a remote system. SELinux user map rules can be defined and, optionally, associated with HBAC rules. These maps define the context a user receives depending on the host they are logging into and the group membership. When a user logs into a remote host which is configured to use SSSD with the Identity Management backend, the user's SELinux context is automatically set according to mapping rules defined for that user. For more information, refer to http://freeipa.org/page/SELinux_user_mapping . This feature is considered a Technology Preview. Package: sssd-1.8.0-32 SSSD support for automount map caching, BZ# 761570 In Red Hat Enterprise Linux 6.3, SSSD includes a new Technology Preview feature: support for caching automount maps. This feature provides several advantages to environments that operate with autofs : Cached automount maps make it easy for a client machine to perform mount operations even when the LDAP server is unreachable, but the NFS server remains reachable. When the autofs daemon is configured to look up automount maps via SSSD, only a single file has to be configured: /etc/sssd/sssd.conf . Previously, the /etc/sysconfig/autofs file had to be configured to fetch autofs data. Caching the automount maps results in faster performance on the client and lower traffic on the LDAP server. Package: sssd-1.8.0-32
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.3_technical_notes/authentication_tp
Chapter 1. Preparing your Environment for Installation
Chapter 1. Preparing your Environment for Installation Before you install Satellite, ensure that your environment meets the following requirements. 1.1. System Requirements The following requirements apply to the networked base operating system: x86_64 architecture The latest version of Red Hat Enterprise Linux 8 or Red Hat Enterprise Linux 7 Server 4-core 2.0 GHz CPU at a minimum A minimum of 20 GB RAM is required for Satellite Server to function. In addition, a minimum of 4 GB RAM of swap space is also recommended. Satellite running with less RAM than the minimum value might not operate correctly. A unique host name, which can contain lower-case letters, numbers, dots (.) and hyphens (-) A current Red Hat Satellite subscription Administrative user (root) access A system umask of 0022 Full forward and reverse DNS resolution using a fully-qualified domain name Satellite only supports UTF-8 encoding. If your territory is USA and your language is English, set en_US.utf-8 as the system-wide locale settings. For more information about configuring system locale in Red Hat Enterprise Linux, see Configuring System Locale guide . Your Satellite must have the Red Hat Satellite Infrastructure Subscription manifest in your Customer Portal. Satellite must have satellite-capsule-6.x repository enabled and synced. To create, manage, and export a Red Hat Subscription Manifest in the Customer Portal, see Creating and managing manifests for a connected Satellite Server in Subscription Central . Satellite Server and Capsule Server do not support shortnames in the hostnames. When using custom certificates, the Common Name (CN) of the custom certificate must be a fully qualified domain name (FQDN) instead of a shortname. This does not apply to the clients of a Satellite. Before you install Satellite Server, ensure that your environment meets the requirements for installation. Satellite Server must be installed on a freshly provisioned system that serves no other function except to run Satellite Server. The freshly provisioned system must not have the following users provided by external identity providers to avoid conflicts with the local users that Satellite Server creates: apache foreman foreman-proxy postgres pulp puppet qdrouterd qpidd redis tomcat Certified hypervisors Satellite Server is fully supported on both physical systems and virtual machines that run on hypervisors that are supported to run Red Hat Enterprise Linux. For more information about certified hypervisors, see Certified Guest Operating Systems in Red Hat OpenStack Platform, Red Hat Virtualization, Red Hat OpenShift Virtualization and Red Hat Enterprise Linux with KVM . SELinux Mode SELinux must be enabled, either in enforcing or permissive mode. Installation with disabled SELinux is not supported. FIPS Mode You can install Satellite on a Red Hat Enterprise Linux system that is operating in FIPS mode. You cannot enable FIPS mode after the installation of Satellite. For more information, see Installing a RHEL 8 system with FIPS mode enabled in the Red Hat Enterprise Linux Security Hardening Guide . For more information about FIPS on Red Hat Enterprise Linux 7 systems, see Enabling FIPS Mode in the Red Hat Enterprise Linux Security Guide . Note Satellite supports DEFAULT and FIPS crypto-policies. The FUTURE crypto-policy is not supported for Satellite and Capsule installations. Inter-Satellite Synchronization (ISS) In a scenario with air-gapped Satellite Servers, all your Satellite Servers must be on the same Satellite version for ISS Export Sync to work. ISS Network Sync works across all Satellite versions that support it. For more information, see Synchronizing Content Between Satellite Servers in Managing Content . 1.2. Storage Requirements Red Hat Enterprise Linux 8 Red Hat Enterprise Linux 7 The following table details storage requirements for specific directories. These values are based on expected use case scenarios and can vary according to individual environments. The runtime size was measured with Red Hat Enterprise Linux 6, 7, and 8 repositories synchronized. 1.2.1. Red Hat Enterprise Linux 8 Table 1.1. Storage Requirements for a Satellite Server Installation Directory Installation Size Runtime Size /var/log 10 MB 10 GB /var/lib/pgsql 100 MB 20 GB /usr 5 GB Not Applicable /opt/puppetlabs 500 MB Not Applicable /var/lib/pulp 1 MB 300 GB /var/lib/qpidd 25 MB Refer Storage Guidelines For external database servers: /var/lib/pgsql with installation size of 100 MB and runtime size of 20 GB. For detailed information on partitioning and size, refer to the Red Hat Enterprise Linux 8 partitioning guide . 1.2.2. Red Hat Enterprise Linux 7 Table 1.2. Storage Requirements for a Satellite Server Installation Directory Installation Size Runtime Size /var/log 10 MB 10 GB /var/opt/rh/rh-postgresql12 100 MB 20 GB /usr 3 GB Not Applicable /opt 3 GB Not Applicable /opt/puppetlabs 500 MB Not Applicable /var/lib/pulp 1 MB 300 GB /var/lib/qpidd 25 MB Refer Storage Guidelines For external database servers: /var/lib/pgsql with installation size of 100 MB and runtime size of 20 GB. 1.3. Storage Guidelines Consider the following guidelines when installing Satellite Server to increase efficiency. If you mount the /tmp directory as a separate file system, you must use the exec mount option in the /etc/fstab file. If /tmp is already mounted with the noexec option, you must change the option to exec and re-mount the file system. This is a requirement for the puppetserver service to work. Because most Satellite Server data is stored in the /var directory, mounting /var on LVM storage can help the system to scale. The /var/lib/qpidd/ directory uses slightly more than 2 MB per Content Host managed by the goferd service. For example, 10 000 Content Hosts require 20 GB of disk space in /var/lib/qpidd/ . Use high-bandwidth, low-latency storage for the /var/lib/pulp/ directories. As Red Hat Satellite has many operations that are I/O intensive, using high latency, low-bandwidth storage causes performance degradation. Ensure your installation has a speed in the range 60 - 80 Megabytes per second. You can use the storage-benchmark script to get this data. For more information on using the storage-benchmark script, see Impact of Disk Speed on Satellite Operations . File System Guidelines Do not use the GFS2 file system as the input-output latency is too high. Log File Storage Log files are written to /var/log/messages/, /var/log/httpd/ , and /var/lib/foreman-proxy/openscap/content/ . You can manage the size of these files using logrotate . For more information, see Log Rotation in the Red Hat Enterprise Linux 7 System Administrator's Guide . The exact amount of storage you require for log messages depends on your installation and setup. SELinux Considerations for NFS Mount When the /var/lib/pulp directory is mounted using an NFS share, SELinux blocks the synchronization process. To avoid this, specify the SELinux context of the /var/lib/pulp directory in the file system table by adding the following lines to /etc/fstab : If NFS share is already mounted, remount it using the above configuration and enter the following command: Duplicated Packages Packages that are duplicated in different repositories are only stored once on the disk. Additional repositories containing duplicate packages require less additional storage. The bulk of storage resides in the /var/lib/pulp/ directory. These end points are not manually configurable. Ensure that storage is available on the /var file system to prevent storage problems. Software Collections Software collections are installed in the /opt/rh/ and /opt/theforeman/ directories. Write and execute permissions by the root user are required for installation to the /opt directory. Symbolic links You cannot use symbolic links for /var/lib/pulp/ . 1.4. Supported Operating Systems You can install the operating system from a disc, local ISO image, kickstart, or any other method that Red Hat supports. Red Hat Satellite Server is supported on the latest versions of Red Hat Enterprise Linux 8, and Red Hat Enterprise Linux 7 Server that are available at the time when Satellite Server is installed. versions of Red Hat Enterprise Linux including EUS or z-stream are not supported. The following operating systems are supported by the installer, have packages, and are tested for deploying Satellite: Table 1.3. Operating Systems supported by satellite-installer Operating System Architecture Notes Red Hat Enterprise Linux 8 x86_64 only Red Hat Enterprise Linux 7 x86_64 only Before you install Satellite, apply all operating system updates if possible. Red Hat Satellite Server requires a Red Hat Enterprise Linux installation with the @Base package group with no other package-set modifications, and without third-party configurations or software not directly necessary for the direct operation of the server. This restriction includes hardening and other non-Red Hat security software. If you require such software in your infrastructure, install and verify a complete working Satellite Server first, then create a backup of the system before adding any non-Red Hat software. Install Satellite Server on a freshly provisioned system. Red Hat does not support using the system for anything other than running Satellite Server. 1.5. Supported Browsers Satellite supports recent versions of Firefox and Google Chrome browsers. The Satellite web UI and command-line interface support English, Portuguese, Simplified Chinese Traditional Chinese, Korean, Japanese, Italian, Spanish, Russian, French, and German. 1.6. Ports and Firewalls Requirements For the components of Satellite architecture to communicate, ensure that the required network ports are open and free on the base operating system. You must also ensure that the required network ports are open on any network-based firewalls. Use this information to configure any network-based firewalls. Note that some cloud solutions must be specifically configured to allow communications between machines because they isolate machines similarly to network-based firewalls. If you use an application-based firewall, ensure that the application-based firewall permits all applications that are listed in the tables and known to your firewall. If possible, disable the application checking and allow open port communication based on the protocol. Integrated Capsule Satellite Server has an integrated Capsule and any host that is directly connected to Satellite Server is a Client of Satellite in the context of this section. This includes the base operating system on which Capsule Server is running. Clients of Capsule Hosts which are clients of Capsules, other than Satellite's integrated Capsule, do not need access to Satellite Server. For more information on Satellite Topology and an illustration of port connections, see Capsule Networking in Planning for Red Hat Satellite . Required ports can change based on your configuration. The following tables indicate the destination port and the direction of network traffic: Table 1.4. Satellite Server incoming traffic Destination Port Protocol Service Source Required For Description 53 TCP and UDP DNS DNS Servers and clients Name resolution DNS (optional) 67 UDP DHCP Client Dynamic IP DHCP (optional) 69 UDP TFTP Client TFTP Server (optional) 443 TCP HTTPS Capsule Red Hat Satellite API Communication from Capsule 443, 80 TCP HTTPS, HTTP Client Content Retrieval Content 443, 80 TCP HTTPS, HTTP Capsule Content Retrieval Content 443, 80 TCP HTTPS, HTTP Client Content Host Registration Capsule CA RPM installation 443 TCP HTTPS Client Content Host registration Initiation Uploading facts Sending installed packages and traces 443 TCP HTTPS Red Hat Satellite Content Mirroring Management 443 TCP HTTPS Red Hat Satellite Capsule API Smart Proxy functionality 5646 TCP AMQP Capsule Katello agent Forward message to Qpid dispatch router on Satellite (optional) 5910 - 5930 TCP HTTPS Browsers Compute Resource's virtual console 8000 TCP HTTP Client Provisioning templates Template retrieval for client installers, iPXE or UEFI HTTP Boot 8000 TCP HTTPS Client PXE Boot Installation 8140 TCP HTTPS Client Puppet agent Client updates (optional) 9090 TCP HTTPS Client OpenSCAP Configure Client 9090 TCP HTTPS Discovered Node Discovery Host discovery and provisioning 9090 TCP HTTPS Red Hat Satellite Capsule API Capsule functionality Any managed host that is directly connected to Satellite Server is a client in this context because it is a client of the integrated Capsule. This includes the base operating system on which a Capsule Server is running. A DHCP Capsule performs ICMP ping or TCP echo connection attempts to hosts in subnets with DHCP IPAM set to find out if an IP address considered for use is free. This behavior can be turned off using satellite-installer --foreman-proxy-dhcp-ping-free-ip=false . Note Some outgoing traffic returns to Satellite to enable internal communication and security operations. Table 1.5. Satellite Server outgoing traffic Destination Port Protocol Service Destination Required For Description ICMP ping Client DHCP Free IP checking (optional) 7 TCP echo Client DHCP Free IP checking (optional) 22 TCP SSH Target host Remote execution Run jobs 22, 16514 TCP SSH SSH/TLS Compute Resource Satellite originated communications, for compute resources in libvirt 53 TCP and UDP DNS DNS Servers on the Internet DNS Server Resolve DNS records (optional) 53 TCP and UDP DNS DNS Server Capsule DNS Validation of DNS conflicts (optional) 53 TCP and UDP DNS DNS Server Orchestration Validation of DNS conflicts 68 UDP DHCP Client Dynamic IP DHCP (optional) 80 TCP HTTP Remote repository Content Sync Remote yum repository 389, 636 TCP LDAP, LDAPS External LDAP Server LDAP LDAP authentication, necessary only if external authentication is enabled. The port can be customized when LDAPAuthSource is defined 443 TCP HTTPS Satellite Capsule Capsule Configuration management Template retrieval OpenSCAP Remote Execution result upload 443 TCP HTTPS Amazon EC2, Azure, Google GCE Compute resources Virtual machine interactions (query/create/destroy) (optional) 443 TCP HTTPS Capsule Content mirroring Initiation 443 TCP HTTPS Infoblox DHCP Server DHCP management When using Infoblox for DHCP, management of the DHCP leases (optional) 623 Client Power management BMC On/Off/Cycle/Status 5000 TCP HTTPS OpenStack Compute Resource Compute resources Virtual machine interactions (query/create/destroy) (optional) 5646 TCP AMQP Satellite Server Katello agent Forward message to Qpid dispatch router on Capsule (optional) 5671 Qpid Remote install Send install command to client 5671 Dispatch router (hub) Remote install Forward message to dispatch router on Satellite 5671 Satellite Server Remote install for Katello agent Send install command to client 5671 Satellite Server Remote install for Katello agent Forward message to dispatch router on Satellite 5900 - 5930 TCP SSL/TLS Hypervisor noVNC console Launch noVNC console 7911 TCP DHCP, OMAPI DHCP Server DHCP The DHCP target is configured using --foreman-proxy-dhcp-server and defaults to localhost ISC and remote_isc use a configurable port that defaults to 7911 and uses OMAPI 8443 TCP HTTPS Client Discovery Capsule sends reboot command to the discovered host (optional) 9090 TCP HTTPS Capsule Capsule API Management of Capsules 1.7. Enabling Connections from a Client to Satellite Server Capsules and Content Hosts that are clients of a Satellite Server's internal Capsule require access through Satellite's host-based firewall and any network-based firewalls. Use this procedure to configure the host-based firewall on the system that Satellite is installed on, to enable incoming connections from Clients, and to make the configuration persistent across system reboots. For more information on the ports used, see Ports and Firewalls Requirements . Procedure To open the ports for client to Satellite communication, enter the following command on the base operating system that you want to install Satellite on: Make the changes persistent: Verification Enter the following command: For more information, see Using and Configuring firewalld in the Red Hat Enterprise Linux 8 Security Guide , and Getting Started with firewalld in the Red Hat Enterprise Linux 7 Security Guide . 1.8. Verifying DNS resolution Verify the full forward and reverse DNS resolution using a fully-qualified domain name to prevent issues while installing Satellite. Procedure Ensure that the host name and local host resolve correctly: Successful name resolution results in output similar to the following: To avoid discrepancies with static and transient host names, set all the host names on the system by entering the following command: For more information, see the Configuring Host Names Using hostnamectl in the Red Hat Enterprise Linux 7 Networking Guide . Warning Name resolution is critical to the operation of Satellite. If Satellite cannot properly resolve its fully qualified domain name, tasks such as content management, subscription management, and provisioning will fail. 1.9. Tuning Satellite Server with Predefined Profiles If your Satellite deployment includes more than 5000 hosts, you can use predefined tuning profiles to improve performance of Satellite. Note that you cannot use tuning profiles on Capsules. You can choose one of the profiles depending on the number of hosts your Satellite manages and available hardware resources. The tuning profiles are available in the /usr/share/foreman-installer/config/foreman.hiera/tuning/sizes directory. When you run the satellite-installer command with the --tuning option, deployment configuration settings are applied to Satellite in the following order: The default tuning profile defined in the /usr/share/foreman-installer/config/foreman.hiera/tuning/common.yaml file The tuning profile that you want to apply to your deployment and is defined in the /usr/share/foreman-installer/config/foreman.hiera/tuning/sizes/ directory Optional: If you have configured a /etc/foreman-installer/custom-hiera.yaml file, Satellite applies these configuration settings. Note that the configuration settings that are defined in the /etc/foreman-installer/custom-hiera.yaml file override the configuration settings that are defined in the tuning profiles. Therefore, before applying a tuning profile, you must compare the configuration settings that are defined in the default tuning profile in /usr/share/foreman-installer/config/foreman.hiera/tuning/common.yaml , the tuning profile that you want to apply and your /etc/foreman-installer/custom-hiera.yaml file, and remove any duplicated configuration from the /etc/foreman-installer/custom-hiera.yaml file. default Number of managed hosts: 0 - 5000 RAM: 20G Number of CPU cores: 4 medium Number of managed hosts: 5001 - 10000 RAM: 32G Number of CPU cores: 8 large Number of managed hosts: 10001 - 20000 RAM: 64G Number of CPU cores: 16 extra-large Number of managed hosts: 20001 - 60000 RAM: 128G Number of CPU cores: 32 extra-extra-large Number of managed hosts: 60000+ RAM: 256G Number of CPU cores: 48+ Procedure Optional: If you have configured the custom-hiera.yaml file on Satellite Server, back up the /etc/foreman-installer/custom-hiera.yaml file to custom-hiera.original . You can use the backup file to restore the /etc/foreman-installer/custom-hiera.yaml file to its original state if it becomes corrupted: Optional: If you have configured the custom-hiera.yaml file on Satellite Server, review the definitions of the default tuning profile in /usr/share/foreman-installer/config/foreman.hiera/tuning/common.yaml and the tuning profile that you want to apply in /usr/share/foreman-installer/config/foreman.hiera/tuning/sizes/ . Compare the configuration entries against the entries in your /etc/foreman-installer/custom-hiera.yaml file and remove any duplicated configuration settings in your /etc/foreman-installer/custom-hiera.yaml file. Enter the satellite-installer command with the --tuning option for the profile that you want to apply. For example, to apply the medium tuning profile settings, enter the following command:
[ "nfs.example.com:/nfsshare /var/lib/pulp nfs context=\"system_u:object_r:var_lib_t:s0\" 1 2", "restorecon -R /var/lib/pulp", "firewall-cmd --add-port=\"53/udp\" --add-port=\"53/tcp\" --add-port=\"67/udp\" --add-port=\"69/udp\" --add-port=\"80/tcp\" --add-port=\"443/tcp\" --add-port=\"5647/tcp\" --add-port=\"8000/tcp\" --add-port=\"9090/tcp\" --add-port=\"8140/tcp\"", "firewall-cmd --runtime-to-permanent", "firewall-cmd --list-all", "ping -c1 localhost ping -c1 `hostname -f` # my_system.domain.com", "ping -c1 localhost PING localhost (127.0.0.1) 56(84) bytes of data. 64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.043 ms --- localhost ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.043/0.043/0.043/0.000 ms ping -c1 `hostname -f` PING hostname.gateway (XX.XX.XX.XX) 56(84) bytes of data. 64 bytes from hostname.gateway (XX.XX.XX.XX): icmp_seq=1 ttl=64 time=0.019 ms --- localhost.gateway ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 0.019/0.019/0.019/0.000 ms", "hostnamectl set-hostname name", "cp /etc/foreman-installer/custom-hiera.yaml /etc/foreman-installer/custom-hiera.original", "satellite-installer --tuning medium" ]
https://docs.redhat.com/en/documentation/red_hat_satellite/6.11/html/installing_satellite_server_in_a_disconnected_network_environment/preparing_your_environment_for_installation_satellite
3.5. Setting Cgroup Parameters
3.5. Setting Cgroup Parameters Modify the parameters of the control groups by editing the /etc/cgconfig.conf configuration file, or by using the cgset command. Changes made to /etc/cgconfig.conf are preserved after reboot, while cgset changes the cgroup parameters only for the current session. Modifying /etc/cgconfig.conf You can set the controller parameters in the Groups section of /etc/cgconfig.conf . Group entries are defined using the following syntax: Replace name with the name of your cgroup, controller stands for the name of the controller you wish to modify. You should modify only controllers you mounted yourself, not any of the default controllers mounted automatically by systemd . Replace param_name and param_value with the controller parameter you wish to change and its new value. Note that the permissions section is optional. To define permissions for a group entry, use the following syntax: Note Restart the cgconfig service for the changes in the /etc/cgconfig.conf to take effect. Restarting this service rebuilds hierarchies specified in the configuration file but does not affect all mounted hierarchies. You can restart a service by executing the systemctl restart command, however, it is recommended to first stop the cgconfig service: Then open and edit the configuration file. After saving your changes, you can start cgconfig again with the following command: Using the cgset Command Set controller parameters by running the cgset command from a user account with permission to modify the relevant cgroup. Use this only for controllers you mounted manually. The syntax for cgset is: where: parameter is the parameter to be set, which corresponds to the file in the directory of the given cgroup; value is the value for the parameter; path_to_cgroup is the path to the cgroup relative to the root of the hierarchy . The values that can be set with cgset might depend on values set higher in a particular hierarchy. For example, if group1 is limited to use only CPU 0 on a system, you cannot set group1/subgroup1 to use CPUs 0 and 1, or to use only CPU 1. It is also possible use cgset to copy the parameters of one cgroup into another, existing cgroup. The syntax to copy parameters with cgset is: where: path_to_source_cgroup is the path to the cgroup whose parameters are to be copied, relative to the root group of the hierarchy; path_to_target_cgroup is the path to the destination cgroup, relative to the root group of the hierarchy.
[ "group name { [ permissions ] controller { param_name = param_value ; ... } ... }", "perm { task { uid = task_user ; gid = task_group ; } admin { uid = admin_name ; gid = admin_group ; } }", "~]# systemctl stop cgconfig", "~]# systemctl start cgconfig", "cgset -r parameter = value path_to_cgroup", "cgset --copy-from path_to_source_cgroup path_to_target_cgroup" ]
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/resource_management_guide/sec-Setting_Cgroup_Parameters
A.12. KVM Networking Performance
A.12. KVM Networking Performance By default, KVM virtual machines are assigned a virtual Realtek 8139 (rtl8139) NIC (network interface controller). The rtl8139 virtualized NIC works fine in most environments,but this device can suffer from performance degradation problems on some networks, such as a 10 Gigabit Ethernet. To improve performance, you can switch to the paravirtualized network driver. Note Note that the virtualized Intel PRO/1000 ( e1000 ) driver is also supported as an emulated driver choice. To use the e1000 driver, replace virtio in the procedure below with e1000 . For the best performance it is recommended to use the virtio driver. Procedure A.6. Switching to the virtio driver Shut down the guest operating system. Edit the guest's configuration file with the virsh command (where GUEST is the guest's name): The virsh edit command uses the USDEDITOR shell variable to determine which editor to use. Find the network interface section of the configuration. This section resembles the snippet below: Change the type attribute of the model element from 'rtl8139' to 'virtio' . This will change the driver from the rtl8139 driver to the virtio driver. Save the changes and exit the text editor Restart the guest operating system. Creating New Guests Using Other Network Drivers Alternatively, new guests can be created with a different network driver. This may be required if you are having difficulty installing guests over a network connection. This method requires you to have at least one guest already created (possibly installed from CD or DVD) to use as a template. Create an XML template from an existing guest (in this example, named Guest1 ): Copy and edit the XML file and update the unique fields: virtual machine name, UUID, disk image, MAC address, and any other unique parameters. Note that you can delete the UUID and MAC address lines and virsh will generate a UUID and MAC address. Add the model line in the network interface section: Create the new virtual machine:
[ "virsh edit GUEST", "<interface type='network'> [output truncated] <model type='rtl8139' /> </interface>", "<interface type='network'> [output truncated] <model type= 'virtio' /> </interface>", "virsh dumpxml Guest1 > /tmp/ guest-template .xml", "cp /tmp/ guest-template .xml /tmp/ new-guest .xml vi /tmp/ new-guest .xml", "<interface type='network'> [output truncated] <model type='virtio' /> </interface>", "virsh define /tmp/new-guest.xml virsh start new-guest" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_deployment_and_administration_guide/sect-troubleshooting-kvm_networking_performance
4.339. virt-v2v
4.339. virt-v2v 4.339.1. RHSA-2011:1615 - Low: virt-v2v security and bug fix update An updated virt-v2v package that fixes one security issue and several bugs is now available for Red Hat Enterprise Linux 6. The Red Hat Security Response Team has rated this update as having low security impact. A Common Vulnerability Scoring System (CVSS) base score, which gives a detailed severity rating, is available for each vulnerability from the CVE link(s) associated with each description below. virt-v2v is a tool for converting and importing virtual machines to libvirt-managed KVM (Kernel-based Virtual Machine), or Red Hat Enterprise Virtualization. Security Fix CVE-2011-1773 Using virt-v2v to convert a guest that has a password-protected VNC console to a KVM guest removed that password protection from the converted guest: after conversion, a password was not required to access the converted guest's VNC console. Now, converted guests will require the same VNC console password as the original guest. Note that when converting a guest to run on Red Hat Enterprise Virtualization, virt-v2v will display a warning that VNC passwords are not supported. Note The Red Hat Enterprise Linux 6.2 perl-Sys-Virt update must also be installed to correct CVE-2011-1773 . Bug Fixes BZ# 665883 When converting a guest virtual machine (VM), whose name contained certain characters, virt-v2v would create a converted guest with a corrupted name. Now, virt-v2v will not corrupt guest names. BZ# 671094 There were numerous usability issues when running virt-v2v as a non-root user. This update makes it simpler to run virt-v2v as a non-root user. BZ# 673066 virt-v2v failed to convert a Microsoft Windows guest with Windows Recovery Console installed in a separate partition. Now, virt-v2v will successfully convert a guest with Windows Recovery Console installed in a separate partition by ignoring that partition. BZ# 694364 virt-v2v failed to convert a Red Hat Enterprise Linux guest which did not have the symlink "/boot/grub/menu.lst". With this update, virt-v2v can select a grub configuration file from several places. BZ# 694370 This update removes information about the usage of deprecated command line options in the virt-v2v man page. BZ# 696089 virt-v2v would fail to correctly change the allocation policy, (sparse or preallocated) when converting a guest with QCOW2 image format. The error message "Cannot import VM, The selected disk configuration is not supported" was displayed. With this update, allocation policy changes to a guest with QCOW2 storage will work correctly. BZ# 700759 The options "--network" and "--bridge" can not be used in conjunction when converting a guest, but no error message was displayed. With this update, virt-v2v will now display an error message if the mutually exclusive "--network" and "--bridge" command line options are both specified. BZ# 702007 virt-v2v failed to convert a multi-boot guest, and did not clean up temporary storage and mount points after failure. With this update, virt-v2v will prompt for which operating system to convert from a multi-boot guest, and will correctly clean up if the process fails. BZ# 707261 virt-v2v failed to correctly configure modprobe aliases when converting a VMware ESX guest with VMware Tools installed. With this update, modprobe aliases will be correctly configured. BZ# 727489 When converting a guest with preallocated raw storage using the libvirtxml input method, virt-v2v failed with the erroneous error message "size(X) < usage(Y)". This update removes this erroneous error. BZ# 708961 When converting a Red Hat Enterprise Linux guest, virt-v2v did not check that the Cirrus X driver was available before configuring it. With this update, virt-v2v will attempt to install the Cirrus X driver if it is required. BZ# 732421 VirtIO systems do not support the Windows Recovery Console on 32-bit Windows XP. The virt-v2v man page has been updated to note this. On Windows XP Professional x64 Edition, however, if Windows Recovery Console is re-installed after conversion, it will work as expected. BZ# 677870 Placing comments in the guest fstab file by means of the leading "#" symbol caused an "unknown filesystem" error after conversion of a guest. With this update comments can now be used and error messages will not be displayed. Users of virt-v2v should upgrade to this updated package, which fixes these issues and upgrades virt-v2v to version 0.8.3.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.2_technical_notes/virt-v2v
Customizing
Customizing Red Hat Developer Hub 1.4 Customizing Red Hat Developer Hub Red Hat Customer Content Services
null
https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.4/html/customizing/index
Chapter 5. Configuring Red Hat Cluster With system-config-cluster
Chapter 5. Configuring Red Hat Cluster With system-config-cluster This chapter describes how to configure Red Hat Cluster software using system-config-cluster , and consists of the following sections: Section 5.1, "Configuration Tasks" Section 5.2, "Starting the Cluster Configuration Tool " Section 5.3, "Configuring Cluster Properties" Section 5.4, "Configuring Fence Devices" Section 5.5, "Adding and Deleting Members" Section 5.6, "Configuring a Failover Domain" Section 5.7, "Adding Cluster Resources" Section 5.8, "Adding a Cluster Service to the Cluster" Section 5.9, "Propagating The Configuration File: New Cluster" Section 5.10, "Starting the Cluster Software" Note While system-config-cluster provides several convenient tools for configuring and managing a Red Hat Cluster, the newer, more comprehensive tool, Conga , provides more convenience and flexibility than system-config-cluster . You may want to consider using Conga instead (refer to Chapter 3, Configuring Red Hat Cluster With Conga and Chapter 4, Managing Red Hat Cluster With Conga ). 5.1. Configuration Tasks Configuring Red Hat Cluster software with system-config-cluster consists of the following steps: Starting the Cluster Configuration Tool , system-config-cluster . Refer to Section 5.2, "Starting the Cluster Configuration Tool " . Configuring cluster properties. Refer to Section 5.3, "Configuring Cluster Properties" . Creating fence devices. Refer to Section 5.4, "Configuring Fence Devices" . Creating cluster members. Refer to Section 5.5, "Adding and Deleting Members" . Creating failover domains. Refer to Section 5.6, "Configuring a Failover Domain" . Creating resources. Refer to Section 5.7, "Adding Cluster Resources" . Creating cluster services. Refer to Section 5.8, "Adding a Cluster Service to the Cluster" . Propagating the configuration file to the other nodes in the cluster. Refer to Section 5.9, "Propagating The Configuration File: New Cluster" . Starting the cluster software. Refer to Section 5.10, "Starting the Cluster Software" .
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/cluster_administration/ch-config-scc-ca
Building your RHEL AI environment
Building your RHEL AI environment Red Hat Enterprise Linux AI 1.1 Creating accounts, initalizing RHEL AI, downloading models, and serving/chat customizations Red Hat RHEL AI Documentation Team
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_ai/1.1/html/building_your_rhel_ai_environment/index
Chapter 5. Viewing and managing Apache Camel applications
Chapter 5. Viewing and managing Apache Camel applications In the Fuse Console's Camel tab, you view and manage Apache Camel contexts, routes, and dependencies. You can view the following details: A list of all running Camel contexts Detailed information of each Camel context such as Camel version number and runtime statics Lists of all routes in each Camel application and their runtime statistics Graphical representation of the running routes along with real time metrics You can also interact with a Camel application by: Starting and suspending contexts Managing the lifecycle of all Camel applications and their routes, so you can restart, stop, pause, resume, etc. Live tracing and debugging of running routes Browsing and sending messages to Camel endpoints Prerequisite The Camel tab is only available when you connect to a container that uses one or more Camel routes. 5.1. Starting, suspending, or deleting a context In the Camel tab's tree view, click Camel Contexts . Check the box to one or more contexts in the list. Click Start or Suspend . To delete a context: Stop the context. Click the ellipse icon and then select Delete from the dropdown menu. Note When you delete a context, you remove it from the deployed application. 5.2. Viewing Camel application details In the Camel tab's tree view, click a Camel application. To view a list of application attributes and values, click Attributes . To view a graphical representation of the application attributes, click Chart and then click Edit to select the attributes that you want to see in the chart. To view inflight and blocked exchanges, click Exchanges . To view application endpoints, click Endpoints . You can filter the list by URL , Route ID , and direction . To view, enable, and disable statistics related to the Camel built-in type conversion mechanism that is used to convert message bodies and message headers to different types, click Type Converters . To view and execute JMX operations, such as adding or updating routes from XML or finding all Camel components available in the classpath, click Operations . 5.3. Viewing a list of the Camel routes and interacting with them To view a list of routes: Click the Camel tab. In the tree view, click the application's routes folder: To start, stop, or delete one or more routes: Check the box to one or more routes in the list. Click Start or Stop . To delete a route, you must first stop it. Then click the ellipse icon and select Delete from the dropdown menu. Note When you delete a route, you remove it from the deployed application. You can also select a specific route in the tree view and then click the upper-right menu to start, stop, or delete it. To view a graphical diagram of the routes, click Route Diagram . To view inflight and blocked exchanges, click Exchanges . To view endpoints, click Endpoints . You can filter the list by URL, Route ID, and direction. Click Type Converters to view, enable, and disable statistics related to the Camel built-in type conversion mechanism, which is used to convert message bodies and message headers to different types. To interact with a specific route: In the Camel tab's tree view, select a route. To view a list of route attributes and values, click Attributes . To view a graphical representation of the route attributes, click Chart . You can click Edit to select the attributes that you want to see in the chart. To view inflight and blocked exchanges, click Exchanges . Click Operations to view and execute JMX operations on the route, such as dumping the route as XML or getting the route's Camel ID value. To trace messages through a route: In the Camel tab's tree view, select a route. Select Trace , and then click Start tracing . To send messages to a route: In the Camel tab's tree view, open the context's endpoints folder and then select an endpoint. Click the Send subtab. Configure the message in JSON or XML format. Click Send . Return to the route's Trace tab to view the flow of messages through the route. 5.4. Debugging a route In the Camel tab's tree view, select a route. Select Debug , and then click Start debugging . To add a breakpoint, select a node in the diagram and then click Add breakpoint . A red dot appears in the node: The node is added to the list of breakpoints: Click the down arrow to step to the node or the Play button to resume running the route. Click the Pause button to suspend all threads for the route. Click Stop debugging when you are done. All breakpoints are cleared.
null
https://docs.redhat.com/en/documentation/red_hat_fuse/7.13/html/managing_fuse_on_openshift/fuse-console-view-camel-all_camelopen
Chapter 1. OpenShift Container Platform 4.10 Documentation
Chapter 1. OpenShift Container Platform 4.10 Documentation Welcome to the official OpenShift Container Platform 4.10 documentation, where you can learn about OpenShift Container Platform and start exploring its features. To navigate the OpenShift Container Platform 4.10 documentation, you can use one of the following methods: Use the left navigation bar to browse the documentation. Select the task that interests you from the contents of this Welcome page. Start with Architecture and Security and compliance . Then, see the release notes . 1.1. Cluster installer activities Explore these OpenShift Container Platform installation tasks. OpenShift Container Platform installation overview : You can install OpenShift Container Platform on installer-provisioned or user-provisioned infrastructure. The OpenShift Container Platform installation program provides the flexibility to deploy OpenShift Container Platform on a range of different platforms. Install a cluster on Alibaba : You can install OpenShift Container Platform on Alibaba Cloud on installer-provisioned infrastructure. This is currently a Technology Preview feature only. Install a cluster on AWS : You have many installation options when you deploy a cluster on Amazon Web Services (AWS). You can deploy clusters with default settings or custom AWS settings . You can also deploy a cluster on AWS infrastructure that you provisioned yourself. You can modify the provided AWS CloudFormation templates to meet your needs. Install a cluster on Azure : You can deploy clusters with default settings , custom Azure settings , or custom networking settings in Microsoft Azure. You can also provision OpenShift Container Platform into an Azure Virtual Network or use Azure Resource Manager Templates to provision your own infrastructure. Install a cluster on Azure Stack Hub : You can install OpenShift Container Platform on Azure Stack Hub on installer-provisioned infrastructure. Install a cluster on GCP : You can deploy clusters with default settings or custom GCP settings on Google Cloud Platform (GCP). You can also perform a GCP installation where you provision your own infrastructure. Install a cluster on IBM Cloud : You can install OpenShift Container Platform on IBM Cloud on installer-provisioned infrastructure. Install a cluster on IBM Power : You can install OpenShift Container Platform on IBM Power on user-provisioned infrastructure. Install a cluster on VMware vSphere : You can install OpenShift Container Platform on supported versions of vSphere. Install a cluster on VMware Cloud : You can install OpenShift Container Platform on supported versions of VMware Cloud (VMC) on AWS. Install a cluster with z/VM on IBM Z and LinuxONE : You can install OpenShift Container Platform with z/VM on IBM Z and LinuxONE on user-provisioned infrastructure. Install a cluster with RHEL KVM on IBM Z and LinuxONE : You can install OpenShift Container Platform with RHEL KVM on IBM Z and LinuxONE on user-provisioned infrastructure. Install an installer-provisioned cluster on bare metal : You can install OpenShift Container Platform on bare metal with an installer-provisioned architecture. Install a user-provisioned cluster on bare metal : If none of the available platform and cloud provider deployment options meet your needs, you can install OpenShift Container Platform on user-provisioned bare metal infrastructure. Install a cluster on Red Hat OpenStack Platform (RHOSP) : You can install a cluster on RHOSP with customizations , with network customizations , or on a restricted network on installer-provisioned infrastructure. You can install a cluster on RHOSP with customizations , with network customizations , or with SR-IOV on user-provisioned infrastructure. Install a cluster on Red Hat Virtualization (RHV) : You can deploy clusters on Red Hat Virtualization (RHV) with a quick install or an install with customizations . Install a cluster in a restricted network : If your cluster that uses user-provisioned infrastructure on AWS , GCP , vSphere , IBM Z and LinuxONE with z/VM , IBM Z and LinuxONE with RHEL KVM , IBM Power , or bare metal does not have full access to the internet, then mirror the OpenShift Container Platform installation images and install a cluster in a restricted network. Install a cluster in an existing network : If you use an existing Virtual Private Cloud (VPC) in AWS or GCP or an existing VNet on Azure, you can install a cluster. Install a private cluster : If your cluster does not require external internet access, you can install a private cluster on AWS , Azure , or GCP . Internet access is still required to access the cloud APIs and installation media. Check installation logs : Access installation logs to evaluate issues that occur during OpenShift Container Platform installation. Access OpenShift Container Platform : Use credentials output at the end of the installation process to log in to the OpenShift Container Platform cluster from the command line or web console. Install Red Hat OpenShift Data Foundation : You can install Red Hat OpenShift Data Foundation as an Operator to provide highly integrated and simplified persistent storage management for containers. 1.2. Developer activities Develop and deploy containerized applications with OpenShift Container Platform. OpenShift Container Platform is a platform for developing and deploying containerized applications. OpenShift Container Platform documentation helps you: Understand OpenShift Container Platform development : Learn the different types of containerized applications, from simple containers to advanced Kubernetes deployments and Operators. Work with projects : Create projects from the OpenShift Container Platform web console or OpenShift CLI ( oc ) to organize and share the software you develop. Work with applications : Use the Developer perspective in the OpenShift Container Platform web console to create and deploy applications . Use the Topology view to see your applications, monitor status, connect and group components, and modify your code base. Connect your workloads to backing services : The Service Binding Operator enables application developers to easily bind workloads with Operator-managed backing services by automatically collecting and sharing binding data with the workloads. The Service Binding Operator improves the development lifecycle with a consistent and declarative service binding method that prevents discrepancies in cluster environments. Use the developer CLI tool ( odo ) : The odo CLI tool lets developers create single or multi-component applications easily and automates deployment, build, and service route configurations. It abstracts complex Kubernetes and OpenShift Container Platform concepts, allowing you to focus on developing your applications. Create CI/CD Pipelines : Pipelines are serverless, cloud-native, continuous integration and continuous deployment systems that run in isolated containers. They use standard Tekton custom resources to automate deployments and are designed for decentralized teams that work on microservice-based architecture. Manage your infrastructure and application configurations : GitOps is a declarative way to implement continuous deployment for cloud native applications. GitOps defines infrastructure and application definitions as code. Then, it uses this code to manage multiple workspaces and clusters to simplify the creation of infrastructure and application configurations. GitOps also handles and automates complex deployments at a fast pace, saving time during deployment and release cycles. Deploy Helm charts : Helm is a software package manager that simplifies deployment of applications and services to OpenShift Container Platform clusters. Helm uses a packaging format called charts. A Helm chart is a collection of files that describes the OpenShift Container Platform resources. Understand image builds : Choose from different build strategies (Docker, S2I, custom, and pipeline) that can include different kinds of source materials (from places like Git repositories, local binary inputs, and external artifacts). Then, follow examples of build types from basic builds to advanced builds. Create container images : A container image is the most basic building block in OpenShift Container Platform (and Kubernetes) applications. Defining image streams lets you gather multiple versions of an image in one place as you continue its development. S2I containers let you insert your source code into a base container that is set up to run code of a particular type, such as Ruby, Node.js, or Python. Create deployments : Use Deployment and DeploymentConfig objects to exert fine-grained management over applications. Manage deployments using the Workloads page or OpenShift CLI ( oc ). Learn rolling, recreate, and custom deployment strategies. Create templates : Use existing templates or create your own templates that describe how an application is built or deployed. A template can combine images with descriptions, parameters, replicas, exposed ports and other content that defines how an application can be run or built. Understand Operators : Operators are the preferred method for creating on-cluster applications for OpenShift Container Platform 4.10. Learn about the Operator Framework and how to deploy applications using installed Operators into your projects. Develop Operators : Operators are the preferred method for creating on-cluster applications for OpenShift Container Platform 4.10. Learn the workflow for building, testing, and deploying Operators. Then, create your own Operators based on Ansible or Helm , or configure built-in Prometheus monitoring using the Operator SDK. REST API reference : Learn about OpenShift Container Platform application programming interface endpoints. 1.3. Cluster administrator activities Manage machines, provide services to users, and follow monitoring and logging reports. This documentation helps you: Understand OpenShift Container Platform management : Learn about components of the OpenShift Container Platform 4.10 control plane. See how OpenShift Container Platform control plane and worker nodes are managed and updated through the Machine API and Operators . 1.3.1. Manage cluster components Manage machines : Manage machines in your cluster on AWS , Azure , or GCP by deploying health checks and applying autoscaling to machines . Manage container registries : Each OpenShift Container Platform cluster includes a built-in container registry for storing its images. You can also configure a separate Red Hat Quay registry to use with OpenShift Container Platform. The Quay.io web site provides a public container registry that stores OpenShift Container Platform containers and Operators. Manage users and groups : Add users and groups with different levels of permissions to use or modify clusters. Manage authentication : Learn how user, group, and API authentication works in OpenShift Container Platform. OpenShift Container Platform supports multiple identity providers . Manage ingress , API server , and service certificates : OpenShift Container Platform creates certificates by default for the Ingress Operator, the API server, and for services needed by complex middleware applications that require encryption. You might need to change, add, or rotate these certificates. Manage networking : The cluster network in OpenShift Container Platform is managed by the Cluster Network Operator (CNO). The CNO uses iptables rules in kube-proxy to direct traffic between nodes and pods running on those nodes. The Multus Container Network Interface adds the capability to attach multiple network interfaces to a pod. Using network policy features, you can isolate your pods or permit selected traffic. Manage storage : OpenShift Container Platform allows cluster administrators to configure persistent storage using Red Hat OpenShift Data Foundation , AWS Elastic Block Store , NFS , iSCSI , Container Storage Interface (CSI) , and more. You can expand persistent volumes , configure dynamic provisioning , and use CSI to configure , clone , and use snapshots of persistent storage. Manage Operators : Lists of Red Hat, ISV, and community Operators can be reviewed by cluster administrators and installed on their clusters . After you install them, you can run , upgrade , back up, or otherwise manage the Operator on your cluster. 1.3.2. Change cluster components Use custom resource definitions (CRDs) to modify the cluster : Cluster features implemented with Operators can be modified with CRDs. Learn to create a CRD and manage resources from CRDs . Set resource quotas : Choose from CPU, memory, and other system resources to set quotas . Prune and reclaim resources : Reclaim space by pruning unneeded Operators, groups, deployments, builds, images, registries, and cron jobs. Scale and tune clusters : Set cluster limits, tune nodes, scale cluster monitoring, and optimize networking, storage, and routes for your environment. Update a cluster : Use the Cluster Version Operator (CVO) to upgrade your OpenShift Container Platform cluster. If an update is available from the OpenShift Update Service (OSUS), you apply that cluster update from either the OpenShift Container Platform web console or the OpenShift CLI ( oc ). Understanding the OpenShift Update Service : Learn about installing and managing a local OpenShift Update Service for recommending OpenShift Container Platform updates in disconnected environments. 1.3.3. Monitor the cluster Work with OpenShift Logging : Learn about OpenShift Logging and configure different OpenShift Logging types, such as Elasticsearch, Fluentd, and Kibana. Monitor : Learn to configure the monitoring stack . After configuring monitoring, use the web console to access monitoring dashboards . In addition to infrastructure metrics, you can also scrape and view metrics for your own services. Remote health monitoring : OpenShift Container Platform collects anonymized aggregated information about your cluster. Using Telemetry and the Insights Operator, this data is received by Red Hat and used to improve OpenShift Container Platform. You can view the data collected by remote health monitoring .
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.10/html/about/welcome-index
3.2. SystemTap Scripts
3.2. SystemTap Scripts For the most part, SystemTap scripts are the foundation of each SystemTap session. SystemTap scripts instruct SystemTap on what type of information to collect, and what to do once that information is collected. As stated in Chapter 3, Understanding How SystemTap Works , SystemTap scripts are made up of two components: events and handlers . Once a SystemTap session is underway, SystemTap monitors the operating system for the specified events and executes the handlers as they occur. Note An event and its corresponding handler is collectively called a probe . A SystemTap script can have multiple probes. A probe's handler is commonly referred to as a probe body . In terms of application development, using events and handlers is similar to instrumenting the code by inserting diagnostic print statements in a program's sequence of commands. These diagnostic print statements allow you to view a history of commands executed once the program is run. SystemTap scripts allow insertion of the instrumentation code without recompilation of the code and allows more flexibility with regard to handlers. Events serve as the triggers for handlers to run; handlers can be specified to record specified data and print it in a certain manner. Format SystemTap scripts use the file extension .stp , and contains probes written in the following format: SystemTap supports multiple events per probe; multiple events are delimited by a comma ( , ). If multiple events are specified in a single probe, SystemTap will execute the handler when any of the specified events occur. Each probe has a corresponding statement block . This statement block is enclosed in braces ( { } ) and contains the statements to be executed per event. SystemTap executes these statements in sequence; special separators or terminators are generally not necessary between multiple statements. Note Statement blocks in SystemTap scripts follow the same syntax and semantics as the C programming language. A statement block can be nested within another statement block. Systemtap allows you to write functions to factor out code to be used by a number of probes. Thus, rather than repeatedly writing the same series of statements in multiple probes, you can just place the instructions in a function , as in: The statements in function_name are executed when the probe for event executes. The arguments are optional values passed into the function. Important Section 3.2, "SystemTap Scripts" is designed to introduce readers to the basics of SystemTap scripts. To understand SystemTap scripts better, it is advisable that you refer to Chapter 4, Useful SystemTap Scripts ; each section therein provides a detailed explanation of the script, its events, handlers, and expected output. 3.2.1. Event SystemTap events can be broadly classified into two types: synchronous and asynchronous . Synchronous Events A synchronous event occurs when any process executes an instruction at a particular location in kernel code. This gives other events a reference point from which more contextual data may be available. Examples of synchronous events include: syscall. system_call The entry to the system call system_call . If the exit from a syscall is desired, appending a .return to the event monitor the exit of the system call instead. For example, to specify the entry and exit of the system call close , use syscall.close and syscall.close.return respectively. vfs. file_operation The entry to the file_operation event for Virtual File System (VFS). Similar to syscall event, appending a .return to the event monitors the exit of the file_operation operation. kernel.function(" function ") The entry to the kernel function function . For example, kernel.function("sys_open") refers to the "event" that occurs when the kernel function sys_open is called by any thread in the system. To specify the return of the kernel function sys_open , append the return string to the event statement; that is kernel.function("sys_open").return . When defining probe events, you can use asterisk ( * ) for wildcards. You can also trace the entry or exit of a function in a kernel source file. Consider the following example: Example 3.1. wildcards.stp In the example, the first probe's event specifies the entry of ALL functions in the kernel source file net/socket.c . The second probe specifies the exit of all those functions. Note that in this example, there are no statements in the handler; as such, no information will be collected or displayed. kernel.trace(" tracepoint ") The static probe for tracepoint . Recent kernels (2.6.30 and newer) include instrumentation for specific events in the kernel. These events are statically marked with tracepoints. One example of a tracepoint available in systemtap is kernel.trace("kfree_skb") which indicates each time a network buffer is freed in the kernel. module(" module ").function(" function ") Allows you to probe functions within modules. For example: Example 3.2. moduleprobe.stp The first probe in Example 3.2, "moduleprobe.stp" points to the entry of all functions for the ext3 module. The second probe points to the exits of all functions for that same module; the use of the .return suffix is similar to kernel.function() . Note that the probes in Example 3.2, "moduleprobe.stp" do not contain statements in the probe handlers, and as such will not print any useful data (as in Example 3.1, "wildcards.stp" ). A system's kernel modules are typically located in /lib/modules/ kernel_version , where kernel_version refers to the currently loaded kernel version. Modules use the file name extension .ko . Asynchronous Events Asynchronous events are not tied to a particular instruction or location in code. This family of probe points consists mainly of counters, timers, and similar constructs. Examples of asynchronous events include: begin The startup of a SystemTap session; that is as soon as the SystemTap script is run. end The end of a SystemTap session. timer events An event that specifies a handler to be executed periodically. For example: Example 3.3. timer-s.stp Example 3.3, "timer-s.stp" is an example of a probe that prints hello world every 4 seconds. Note that you can also use the following timer events: timer.ms( milliseconds ) timer.us( microseconds ) timer.ns( nanoseconds ) timer.hz( hertz ) timer.jiffies( jiffies ) When used in conjunction with other probes that collect information, timer events allows you to print out get periodic updates and see how that information changes over time. Important SystemTap supports the use of a large collection of probe events. For more information about supported events, refer to man stapprobes . The SEE ALSO section of man stapprobes also contains links to other man pages that discuss supported events for specific subsystems and components.
[ "probe event { statements }", "function function_name ( arguments ) { statements } probe event { function_name ( arguments )}", "probe kernel.function(\"*@net/socket.c\") { } probe kernel.function(\"*@net/socket.c\").return { }", "probe module(\"ext3\").function(\"*\") { } probe module(\"ext3\").function(\"*\").return { }", "probe timer.s(4) { printf(\"hello world\\n\") }" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/systemtap_beginners_guide/scripts
Schedule and quota APIs
Schedule and quota APIs OpenShift Container Platform 4.13 Reference guide for schedule and quota APIs Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html-single/schedule_and_quota_apis/index
Part II. Device Drivers
Part II. Device Drivers This part provides a comprehensive listing of all device drivers that are new or have been updated in Red Hat Enterprise Linux 7.6.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/7.6_release_notes/part-red_hat_enterprise_linux-7.6_release_notes-device_drivers
Chapter 1. Adding secrets to GitLab CI for secure integration with external tools
Chapter 1. Adding secrets to GitLab CI for secure integration with external tools Prerequisites Before you configure GitLab CI, ensure you have the following: Admin access to your GitLab repository and CI/CD settings. Container registry credentials for pulling container images from Quay.io, JFrog Artifactory, or Sonatype Nexus. Authentication details for specific GitLab CI tasks: For ACS security tasks : ROX Central server endpoint ROX API token For SBOM and artifact signing tasks : Cosign signing key password Private key and public key Trustification URL Client ID and secret Supported CycloneDX version Note The credentials and other details are already Base64-encoded, so you do not need to encode them again. You can find these credentials in your private.env file, which you created during RHTAP installation. 1.1. Option 1: Using GitLab UI Procedure Log in to GitLab and open your source repository. Expand the Setting menu and select CI/CD . In the Variables section, select Expand , and then select Add variable . Enter the following details: Under Flags , select Mask variable to hide sensitive values. In the Key field, enter MY_GITLAB_TOKEN . In the Value field, enter the token associated with your GitLab account. Repeat steps 3-4 to add the required variables: Variable Description Provide image registry credentials for only one image registry. QUAY_IO_CREDS_USR Username for accessing Quay.io repository. QUAY_IO_CREDS_PSW Password for accessing Quay.io repository. ARTIFACTORY_IO_CREDS_USR Username for accessing JFrog Artifactory repository. ARTIFACTORY_IO_CREDS_PSW Password for accessing JFrog Artifactory repository. NEXUS_IO_CREDS_USR Username for accessing Sonatype Nexus repository. NEXUS_IO_CREDS_PSW Password for accessing Sonatype Nexus repository. Set these variables if Gitlab CI runners do not run on the same cluster as the RHTAP instance. REKOR_HOST URL of your Rekor server. TUF_MIRROR URL of your TUF service. GitOps configuration for GitLab GITOPS_AUTH_PASSWORD The token the system uses to update the GitOps repository for newly built images. GITOPS_AUTH_USERNAME (optional) The parameter required for GitLab to work with Jenkins. You also need to uncomment a line with this parameter in a Jenkinsfile: GITOPS_AUTH_USERNAME = credentials('GITOPS_AUTH_USERNAME'). By default, this line is commented out. Variable required for ACS tasks. ROX_CENTRAL_ENDPOINT Endpoint for the ROX Central server. ROX_API_TOKEN API token for accessing the ROX server. Variables required for SBOM tasks. COSIGN_SECRET_PASSWORD Password for Cosign signing key. COSIGN_SECRET_KEY Private key for Cosign. COSIGN_PUBLIC_KEY Public key for Cosign. TRUSTIFICATION_BOMBASTIC_API_URL URL for Trustification Bombastic API used in SBOM generation. TRUSTIFICATION_OIDC_ISSUER_URL OIDC issuer URL used for authentication when interacting with the Trustification Bombastic API. TRUSTIFICATION_OIDC_CLIENT_ID Client ID for authenticating to the Trustification Bombastic API using OIDC. TRUSTIFICATION_OIDC_CLIENT_SECRET Client secret used alongside the client ID to authenticate to the Trustification Bombastic API. TRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION Specifies the CycloneDX SBOM version that is supported and generated by the system. Select Add variable . Rerun the last pipeline run to verify the secrets are applied correctly. Alternatively, switch to you application's source repository in GitLab, make a minor change, and commit it to trigger a new pipeline run. 1.2. Option 2: Using CLI Procedure Create a project with two files in your preferred text editor, such as Visual Studio Code: env_vars.sh glab-set-vars Update the env_vars.sh file with the following environment variables: # env_vars.sh # GitLab credentials export MY_GITLAB_TOKEN="your_gitlab_token_here" export MY_GITLAB_USER="your_gitlab_username_here" export GITOPS_AUTH_PASSWORD="your_OpenShift_GitOps_password_here" export GITOPS_AUTH_USERNAME="your_OpenShift_GitOps_username_here" // Provide the credentials for the image registry you use. # Quay.io credentials export QUAY_IO_CREDS_USR="your_quay_username_here" export QUAY_IO_CREDS_PSW="your_quay_password_here" # JFrog Artifactory credenditals export ARTIFACTORY_IO_CREDS_USR="your_artifactory_username_here" export ARTIFACTORY_IO_CREDS_PSW="your_artifactory_password_here" # Sonatype Nexus credentials export NEXUS_IO_CREDS_USR="your_nexus_username_here" export NEXUS_IO_CREDS_PSW="your_nexus_password_here" # Rekor and TUF routes export REKOR_HOST="your rekor server url here" export TUF_MIRROR="your tuf service url here" // Variables required for ACS tasks # ROX variables export ROX_CENTRAL_ENDPOINT="your_rox_central_endpoint_here" export ROX_API_TOKEN="your_rox_api_token_here" // Set these variables if Gitlab CI runners do not run on the same cluster as the {ProductShortName} instance. export ROX_CENTRAL_ENDPOINT="your_rox_central_endpoint_here" export ROX_API_TOKEN="your_rox_api_token_here" // Variables required for SBOM tasks. # Cosign secrets export COSIGN_SECRET_PASSWORD="your_cosign_secret_password_here" export COSIGN_SECRET_KEY="your_cosign_secret_key_here" export COSIGN_PUBLIC_KEY="your_cosign_public_key_here" # Trustification credentials export TRUSTIFICATION_BOMBASTIC_API_URL="your__BOMBASTIC_API_URL_here" export TRUSTIFICATION_OIDC_ISSUER_URL="your_OIDC_ISSUER_URL_here" export TRUSTIFICATION_OIDC_CLIENT_ID="your_OIDC_CLIENT_ID_here" export TRUSTIFICATION_OIDC_CLIENT_SECRET="your_OIDC_CLIENT_SECRET_here" export TRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION="your_SUPPORTED_CYCLONEDX_VERSION_here" Update the glab-set-vars file with the following information: #!/bin/bash SCRIPTDIR="USD(cd "USD(dirname "USD{BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" if [ USD# -ne 1 ]; then echo "Missing param, provide gitlab repo name" echo "Note: This script uses MY_GITLAB_TOKEN and MY_GITLAB_USER env vars" exit fi REPO=USD1 HEADER="PRIVATE-TOKEN: USDMY_GITLAB_TOKEN" URL=https://gitlab.com/api/v4/projects # Look up the project ID so we can use it below PID=USD(curl -s -L --header "USDHEADER" "USDURL/USDMY_GITLAB_USER%2FUSDREPO" | jq ".id") function setVars() { NAME=USD1 VALUE=USD2 MASKED=USD{3:-true} echo "setting USDNAME in https://gitlab.com/USDMY_GITLAB_USER/USDREPO" # Delete first because if the secret already exists then its value # won't be changed by the POST below curl -s --request DELETE --header "USDHEADER" "USDURL/USDPID/variables/USDNAME" # Set the new key/value curl -s --request POST --header "USDHEADER" "USDURL/USDPID/variables" \ --form "key=USDNAME" --form "value=USDVALUE" --form "masked=USDMASKED" | jq } setVars ROX_CENTRAL_ENDPOINT USDROX_CENTRAL_ENDPOINT setVars ROX_API_TOKEN USDROX_API_TOKEN setVars GITOPS_AUTH_PASSWORD USDMY_GITLAB_TOKEN setVars GITOPS_AUTH_USERNAME USDMY_GITLAB_USER setVars QUAY_IO_CREDS_USR USDQUAY_IO_CREDS_USR setVars QUAY_IO_CREDS_PSW USDQUAY_IO_CREDS_PSW setVars COSIGN_SECRET_PASSWORD USDCOSIGN_SECRET_PASSWORD setVars COSIGN_SECRET_KEY USDCOSIGN_SECRET_KEY setVars COSIGN_PUBLIC_KEY USDCOSIGN_PUBLIC_KEY setVars TRUSTIFICATION_BOMBASTIC_API_URL "USDTRUSTIFICATION_BOMBASTIC_API_URL" setVars TRUSTIFICATION_OIDC_ISSUER_URL "USDTRUSTIFICATION_OIDC_ISSUER_URL" setVars TRUSTIFICATION_OIDC_CLIENT_ID "USDTRUSTIFICATION_OIDC_CLIENT_ID" setVars TRUSTIFICATION_OIDC_CLIENT_SECRET "USDTRUSTIFICATION_OIDC_CLIENT_SECRET" setVars TRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION "USDTRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION" setVars ARTIFACTORY_IO_CREDS_USR USDARTIFACTORY_IO_CREDS_USR setVars ARTIFACTORY_IO_CREDS_PSW USDARTIFACTORY_IO_CREDS_PSW setVars NEXUS_IO_CREDS_USR USDNEXUS_IO_CREDS_USR setVars NEXUS_IO_CREDS_PSW USDNEXUS_IO_CREDS_PSW setVars REKOR_HOST USDREKOR_HOST setVars TUF_MIRROR USDTUF_MIRROR (Optional) Modify the glab-set-vars file to disable variables that are not required. For example, to disable setVars ROX_API_TOKEN USDROX_API_TOKEN , add false to it. ROX_API_TOKEN USDROX_API_TOKEN false Load the environment variables into your current shell session: source env_vars.sh Make the glab-set-vars script executable, and run it with your repository name to set the variables in your GitLab repository. chmod +x glab-set-vars ./glab-set-vars your_repository_name Rerun the last pipeline run to verify the secrets are applied correctly. Alternatively, switch to you application's source repository in GitLab, make a minor change, and commit it to trigger a new pipeline run. Revised on 2025-02-12 15:08:44 UTC
[ "env_vars.sh GitLab credentials export MY_GITLAB_TOKEN=\"your_gitlab_token_here\" export MY_GITLAB_USER=\"your_gitlab_username_here\" export GITOPS_AUTH_PASSWORD=\"your_OpenShift_GitOps_password_here\" export GITOPS_AUTH_USERNAME=\"your_OpenShift_GitOps_username_here\" // Provide the credentials for the image registry you use. Quay.io credentials export QUAY_IO_CREDS_USR=\"your_quay_username_here\" export QUAY_IO_CREDS_PSW=\"your_quay_password_here\" JFrog Artifactory credenditals export ARTIFACTORY_IO_CREDS_USR=\"your_artifactory_username_here\" export ARTIFACTORY_IO_CREDS_PSW=\"your_artifactory_password_here\" Sonatype Nexus credentials export NEXUS_IO_CREDS_USR=\"your_nexus_username_here\" export NEXUS_IO_CREDS_PSW=\"your_nexus_password_here\" Rekor and TUF routes export REKOR_HOST=\"your rekor server url here\" export TUF_MIRROR=\"your tuf service url here\" // Variables required for ACS tasks ROX variables export ROX_CENTRAL_ENDPOINT=\"your_rox_central_endpoint_here\" export ROX_API_TOKEN=\"your_rox_api_token_here\" // Set these variables if Gitlab CI runners do not run on the same cluster as the {ProductShortName} instance. export ROX_CENTRAL_ENDPOINT=\"your_rox_central_endpoint_here\" export ROX_API_TOKEN=\"your_rox_api_token_here\" // Variables required for SBOM tasks. Cosign secrets export COSIGN_SECRET_PASSWORD=\"your_cosign_secret_password_here\" export COSIGN_SECRET_KEY=\"your_cosign_secret_key_here\" export COSIGN_PUBLIC_KEY=\"your_cosign_public_key_here\" Trustification credentials export TRUSTIFICATION_BOMBASTIC_API_URL=\"your__BOMBASTIC_API_URL_here\" export TRUSTIFICATION_OIDC_ISSUER_URL=\"your_OIDC_ISSUER_URL_here\" export TRUSTIFICATION_OIDC_CLIENT_ID=\"your_OIDC_CLIENT_ID_here\" export TRUSTIFICATION_OIDC_CLIENT_SECRET=\"your_OIDC_CLIENT_SECRET_here\" export TRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION=\"your_SUPPORTED_CYCLONEDX_VERSION_here\"", "#!/bin/bash SCRIPTDIR=\"USD(cd \"USD(dirname \"USD{BASH_SOURCE[0]}\")\" > /dev/null 2>&1 && pwd)\" if [ USD# -ne 1 ]; then echo \"Missing param, provide gitlab repo name\" echo \"Note: This script uses MY_GITLAB_TOKEN and MY_GITLAB_USER env vars\" exit fi REPO=USD1 HEADER=\"PRIVATE-TOKEN: USDMY_GITLAB_TOKEN\" URL=https://gitlab.com/api/v4/projects Look up the project ID so we can use it below PID=USD(curl -s -L --header \"USDHEADER\" \"USDURL/USDMY_GITLAB_USER%2FUSDREPO\" | jq \".id\") function setVars() { NAME=USD1 VALUE=USD2 MASKED=USD{3:-true} echo \"setting USDNAME in https://gitlab.com/USDMY_GITLAB_USER/USDREPO\" # Delete first because if the secret already exists then its value # won't be changed by the POST below curl -s --request DELETE --header \"USDHEADER\" \"USDURL/USDPID/variables/USDNAME\" # Set the new key/value curl -s --request POST --header \"USDHEADER\" \"USDURL/USDPID/variables\" --form \"key=USDNAME\" --form \"value=USDVALUE\" --form \"masked=USDMASKED\" | jq } setVars ROX_CENTRAL_ENDPOINT USDROX_CENTRAL_ENDPOINT setVars ROX_API_TOKEN USDROX_API_TOKEN setVars GITOPS_AUTH_PASSWORD USDMY_GITLAB_TOKEN setVars GITOPS_AUTH_USERNAME USDMY_GITLAB_USER setVars QUAY_IO_CREDS_USR USDQUAY_IO_CREDS_USR setVars QUAY_IO_CREDS_PSW USDQUAY_IO_CREDS_PSW setVars COSIGN_SECRET_PASSWORD USDCOSIGN_SECRET_PASSWORD setVars COSIGN_SECRET_KEY USDCOSIGN_SECRET_KEY setVars COSIGN_PUBLIC_KEY USDCOSIGN_PUBLIC_KEY setVars TRUSTIFICATION_BOMBASTIC_API_URL \"USDTRUSTIFICATION_BOMBASTIC_API_URL\" setVars TRUSTIFICATION_OIDC_ISSUER_URL \"USDTRUSTIFICATION_OIDC_ISSUER_URL\" setVars TRUSTIFICATION_OIDC_CLIENT_ID \"USDTRUSTIFICATION_OIDC_CLIENT_ID\" setVars TRUSTIFICATION_OIDC_CLIENT_SECRET \"USDTRUSTIFICATION_OIDC_CLIENT_SECRET\" setVars TRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION \"USDTRUSTIFICATION_SUPPORTED_CYCLONEDX_VERSION\" setVars ARTIFACTORY_IO_CREDS_USR USDARTIFACTORY_IO_CREDS_USR setVars ARTIFACTORY_IO_CREDS_PSW USDARTIFACTORY_IO_CREDS_PSW setVars NEXUS_IO_CREDS_USR USDNEXUS_IO_CREDS_USR setVars NEXUS_IO_CREDS_PSW USDNEXUS_IO_CREDS_PSW setVars REKOR_HOST USDREKOR_HOST setVars TUF_MIRROR USDTUF_MIRROR", "ROX_API_TOKEN USDROX_API_TOKEN false", "source env_vars.sh", "chmod +x glab-set-vars ./glab-set-vars your_repository_name" ]
https://docs.redhat.com/en/documentation/red_hat_trusted_application_pipeline/1.4/html/configuring_gitlab_ci/adding-secrets-to-gitlab-ci-for-secure-integration-with-external-tools_gitlab-ci
20.5.2. Port Forwarding
20.5.2. Port Forwarding SSH can secure otherwise insecure TCP/IP protocols via port forwarding. When using this technique, the SSH server becomes an encrypted conduit to the SSH client. Port forwarding works by mapping a local port on the client to a remote port on the server. SSH can map any port from the server to any port on the client; port numbers do not need to match for this technique to work. To create a TCP/IP port forwarding channel which listens for connections on the localhost, use the following command: Note Setting up port forwarding to listen on ports below 1024 requires root level access. To check email on a server called mail.example.com using POP3 through an encrypted connection, use the following command: Once the port forwarding channel is in place between the client machine and the mail server, direct a POP3 mail client to use port 1100 on the localhost to check for new mail. Any requests sent to port 1100 on the client system are directed securely to the mail.example.com server. If mail.example.com is not running an SSH server, but another machine on the same network is, SSH can still be used to secure part of the connection. However, a slightly different command is necessary: In this example, POP3 requests from port 1100 on the client machine are forwarded through the SSH connection on port 22 to the SSH server, other.example.com . Then, other.example.com connects to port 110 on mail.example.com to check for new mail. Note, when using this technique only the connection between the client system and other.example.com SSH server is secure. Port forwarding can also be used to get information securely through network firewalls. If the firewall is configured to allow SSH traffic via its standard port (22) but blocks access to other ports, a connection between two hosts using the blocked ports is still possible by redirecting their communication over an established SSH connection. Note Using port forwarding to forward connections in this manner allows any user on the client system to connect to that service. If the client system becomes compromised, the attacker also has access to forwarded services. System administrators concerned about port forwarding can disable this functionality on the server by specifying a No parameter for the AllowTcpForwarding line in /etc/ssh/sshd_config and restarting the sshd service.
[ "ssh -L local-port : remote-hostname : remote-port username @ hostname", "ssh -L 1100:mail.example.com:110 mail.example.com", "ssh -L 1100:mail.example.com:110 other.example.com" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/reference_guide/s2-ssh-beyondshell-TCPIP
Chapter 26. pid
Chapter 26. pid The process ID of the logging entity, if available. Data type keyword
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.10/html/logging/pid
20.6. Compatibility with Older Systems
20.6. Compatibility with Older Systems If an ACL has been set on any file on a given file system, that file system has the ext_attr attribute. This attribute can be seen using the following command: A file system that has acquired the ext_attr attribute can be mounted with older kernels, but those kernels do not enforce any ACLs which have been set. Versions of the e2fsck utility included in version 1.22 and higher of the e2fsprogs package (including the versions in Red Hat Enterprise Linux 2.1 and 4) can check a file system with the ext_attr attribute. Older versions refuse to check it.
[ "tune2fs -l filesystem-device" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/storage_administration_guide/acls-compat-older
User and group APIs
User and group APIs OpenShift Container Platform 4.14 Reference guide for user and group APIs Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.14/html/user_and_group_apis/index
17.5. Using Cross-Pair Certificates
17.5. Using Cross-Pair Certificates In the late 1990s, as the US government began enhancing its public key infrastructure, it became apparent that branches of government with their own, separate PKI deployments still needed to be able to recognize and trust each others certificates as if the certificates were issued from their own CA. (The method of getting certificates trusted outside a network for external clients to use is a serious, not easily resolved issue for any PKI administrator.) The US government devised a standard for issuing cross-pair certificates called the Federal Bridge Certificate Authority. These certificates are also called bridge certificates , for obvious reasons. Bridge or cross-pair certificates are CA signing certificate that are framed as dual certificate pairs, similar to encryption and signing certificate pairs for users, only each certificate in the pair is issued by a different CA. Both partner CAs store the other CA signing certificate in its database, so all of the certificates issued within the other PKI are trusted and recognized. Bridging certificates honors certificates issued by a CA that is not chained to the root CA in its own PKI. By establishing a trust between the Certificate System CA and another CA through a cross-pair CA certificate, the cross-pair certificate can be downloaded and used to trust the certificates issued by the other CA, just as downloading and installing a single CA certificate trusts all certificates issued by the CA. The Certificate System can issue, import, and publish cross-pair CA certificates. A special profile must be created for issuing cross-pair certificates, and then the certificates can be requested and installed for the CA using the Certificate Wizard for the CA subsystem. For more information on creating cross-pair certificate profiles, see the Configuring Cross-Pair profiles section in the Red Hat Certificate System Planning, Installation, and Deployment Guide . For more information on publishing cross-pair certificates, see Section 9.9, "Publishing Cross-Pair Certificates" . 17.5.1. Installing Cross-Pair Certificates Both cross-pair certificates can be imported into the Certificate System databases using the certutil tool or by selecting the Cross-Pair Certificates option from the Certificate Setup Wizard, as described in Section 17.6.1, "Installing Certificates in the Certificate System Database" . When both certificates have been imported into the database, a crossCertificatePair entry is formed and stored in the database. The original individual cross-pair CA certificates are deleted once the crossCertificatePair entry is created. 17.5.2. Searching for Cross-Pair Certificates Both CAs in bridge certificates can store or publish the cross-pair certificates as a crossCertificatePair entry in an LDAP database. The Certificate Manager's internal database can be searched for the crossCertificatePair entry with ldapsearch .
[ "/usr/lib[64]/mozldap/ldapsearch -D \"cn=directory manager\" -w secret -p 389 -h server.example.com -b \"o=server.example.com-pki-ca\" -s sub \"(crossCertificatePair=*)\"" ]
https://docs.redhat.com/en/documentation/red_hat_certificate_system/10/html/administration_guide/using_cross_pair_certificates
Red Hat Developer Hub support
Red Hat Developer Hub support If you experience difficulty with a procedure described in this documentation, visit the Red Hat Customer Portal . You can use the Red Hat Customer Portal for the following purposes: To search or browse through the Red Hat Knowledgebase of technical support articles about Red Hat products. To create a support case for Red Hat Global Support Services (GSS). For support case creation, select Red Hat Developer Hub as the product and select the appropriate product version.
null
https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.2/html/configuring_plugins_in_red_hat_developer_hub/snip-customer-support-info_plugin-rhdh
Chapter 2. Understanding Operators
Chapter 2. Understanding Operators 2.1. What are Operators? Conceptually, Operators take human operational knowledge and encode it into software that is more easily shared with consumers. Operators are pieces of software that ease the operational complexity of running another piece of software. They act like an extension of the software vendor's engineering team, monitoring a Kubernetes environment (such as OpenShift Container Platform) and using its current state to make decisions in real time. Advanced Operators are designed to handle upgrades seamlessly, react to failures automatically, and not take shortcuts, like skipping a software backup process to save time. More technically, Operators are a method of packaging, deploying, and managing a Kubernetes application. A Kubernetes application is an app that is both deployed on Kubernetes and managed using the Kubernetes APIs and kubectl or oc tooling. To be able to make the most of Kubernetes, you require a set of cohesive APIs to extend in order to service and manage your apps that run on Kubernetes. Think of Operators as the runtime that manages this type of app on Kubernetes. 2.1.1. Why use Operators? Operators provide: Repeatability of installation and upgrade. Constant health checks of every system component. Over-the-air (OTA) updates for OpenShift components and ISV content. A place to encapsulate knowledge from field engineers and spread it to all users, not just one or two. Why deploy on Kubernetes? Kubernetes (and by extension, OpenShift Container Platform) contains all of the primitives needed to build complex distributed systems - secret handling, load balancing, service discovery, autoscaling - that work across on-premises and cloud providers. Why manage your app with Kubernetes APIs and kubectl tooling? These APIs are feature rich, have clients for all platforms and plug into the cluster's access control/auditing. An Operator uses the Kubernetes extension mechanism, custom resource definitions (CRDs), so your custom object, for example MongoDB , looks and acts just like the built-in, native Kubernetes objects. How do Operators compare with service brokers? A service broker is a step towards programmatic discovery and deployment of an app. However, because it is not a long running process, it cannot execute Day 2 operations like upgrade, failover, or scaling. Customizations and parameterization of tunables are provided at install time, versus an Operator that is constantly watching the current state of your cluster. Off-cluster services are a good match for a service broker, although Operators exist for these as well. 2.1.2. Operator Framework The Operator Framework is a family of tools and capabilities to deliver on the customer experience described above. It is not just about writing code; testing, delivering, and updating Operators is just as important. The Operator Framework components consist of open source tools to tackle these problems: Operator SDK The Operator SDK assists Operator authors in bootstrapping, building, testing, and packaging their own Operator based on their expertise without requiring knowledge of Kubernetes API complexities. Operator Lifecycle Manager Operator Lifecycle Manager (OLM) controls the installation, upgrade, and role-based access control (RBAC) of Operators in a cluster. Deployed by default in OpenShift Container Platform 4.9. Operator Registry The Operator Registry stores cluster service versions (CSVs) and custom resource definitions (CRDs) for creation in a cluster and stores Operator metadata about packages and channels. It runs in a Kubernetes or OpenShift cluster to provide this Operator catalog data to OLM. OperatorHub OperatorHub is a web console for cluster administrators to discover and select Operators to install on their cluster. It is deployed by default in OpenShift Container Platform. These tools are designed to be composable, so you can use any that are useful to you. 2.1.3. Operator maturity model The level of sophistication of the management logic encapsulated within an Operator can vary. This logic is also in general highly dependent on the type of the service represented by the Operator. One can however generalize the scale of the maturity of the encapsulated operations of an Operator for certain set of capabilities that most Operators can include. To this end, the following Operator maturity model defines five phases of maturity for generic day two operations of an Operator: Figure 2.1. Operator maturity model The above model also shows how these capabilities can best be developed through the Helm, Go, and Ansible capabilities of the Operator SDK. 2.2. Operator Framework packaging format This guide outlines the packaging format for Operators supported by Operator Lifecycle Manager (OLM) in OpenShift Container Platform. Note Support for the legacy package manifest format for Operators is removed in OpenShift Container Platform 4.8 and later. Existing Operator projects in the package manifest format can be migrated to the bundle format by using the Operator SDK pkgman-to-bundle command. See Migrating package manifest projects to bundle format for more details. 2.2.1. Bundle format The bundle format for Operators is a packaging format introduced by the Operator Framework. To improve scalability and to better enable upstream users hosting their own catalogs, the bundle format specification simplifies the distribution of Operator metadata. An Operator bundle represents a single version of an Operator. On-disk bundle manifests are containerized and shipped as a bundle image , which is a non-runnable container image that stores the Kubernetes manifests and Operator metadata. Storage and distribution of the bundle image is then managed using existing container tools like podman and docker and container registries such as Quay. Operator metadata can include: Information that identifies the Operator, for example its name and version. Additional information that drives the UI, for example its icon and some example custom resources (CRs). Required and provided APIs. Related images. When loading manifests into the Operator Registry database, the following requirements are validated: The bundle must have at least one channel defined in the annotations. Every bundle has exactly one cluster service version (CSV). If a CSV owns a custom resource definition (CRD), that CRD must exist in the bundle. 2.2.1.1. Manifests Bundle manifests refer to a set of Kubernetes manifests that define the deployment and RBAC model of the Operator. A bundle includes one CSV per directory and typically the CRDs that define the owned APIs of the CSV in its /manifests directory. Example bundle format layout etcd ├── manifests │ ├── etcdcluster.crd.yaml │ └── etcdoperator.clusterserviceversion.yaml │ └── secret.yaml │ └── configmap.yaml └── metadata └── annotations.yaml └── dependencies.yaml Additionally supported objects The following object types can also be optionally included in the /manifests directory of a bundle: Supported optional object types ClusterRole ClusterRoleBinding ConfigMap ConsoleYamlSample PodDisruptionBudget PriorityClass PrometheusRule Role RoleBinding Secret Service ServiceAccount ServiceMonitor VerticalPodAutoscaler When these optional objects are included in a bundle, Operator Lifecycle Manager (OLM) can create them from the bundle and manage their lifecycle along with the CSV: Lifecycle for optional objects When the CSV is deleted, OLM deletes the optional object. When the CSV is upgraded: If the name of the optional object is the same, OLM updates it in place. If the name of the optional object has changed between versions, OLM deletes and recreates it. 2.2.1.2. Annotations A bundle also includes an annotations.yaml file in its /metadata directory. This file defines higher level aggregate data that helps describe the format and package information about how the bundle should be added into an index of bundles: Example annotations.yaml annotations: operators.operatorframework.io.bundle.mediatype.v1: "registry+v1" 1 operators.operatorframework.io.bundle.manifests.v1: "manifests/" 2 operators.operatorframework.io.bundle.metadata.v1: "metadata/" 3 operators.operatorframework.io.bundle.package.v1: "test-operator" 4 operators.operatorframework.io.bundle.channels.v1: "beta,stable" 5 operators.operatorframework.io.bundle.channel.default.v1: "stable" 6 1 The media type or format of the Operator bundle. The registry+v1 format means it contains a CSV and its associated Kubernetes objects. 2 The path in the image to the directory that contains the Operator manifests. This label is reserved for future use and currently defaults to manifests/ . The value manifests.v1 implies that the bundle contains Operator manifests. 3 The path in the image to the directory that contains metadata files about the bundle. This label is reserved for future use and currently defaults to metadata/ . The value metadata.v1 implies that this bundle has Operator metadata. 4 The package name of the bundle. 5 The list of channels the bundle is subscribing to when added into an Operator Registry. 6 The default channel an Operator should be subscribed to when installed from a registry. Note In case of a mismatch, the annotations.yaml file is authoritative because the on-cluster Operator Registry that relies on these annotations only has access to this file. 2.2.1.3. Dependencies file The dependencies of an Operator are listed in a dependencies.yaml file in the metadata/ folder of a bundle. This file is optional and currently only used to specify explicit Operator-version dependencies. The dependency list contains a type field for each item to specify what kind of dependency this is. There are two supported types of Operator dependencies: olm.package : This type indicates a dependency for a specific Operator version. The dependency information must include the package name and the version of the package in semver format. For example, you can specify an exact version such as 0.5.2 or a range of versions such as >0.5.1 . olm.gvk : With a gvk type, the author can specify a dependency with group/version/kind (GVK) information, similar to existing CRD and API-based usage in a CSV. This is a path to enable Operator authors to consolidate all dependencies, API or explicit versions, to be in the same place. In the following example, dependencies are specified for a Prometheus Operator and etcd CRDs: Example dependencies.yaml file dependencies: - type: olm.package value: packageName: prometheus version: ">0.27.0" - type: olm.gvk value: group: etcd.database.coreos.com kind: EtcdCluster version: v1beta2 Additional resources Operator Lifecycle Manager dependency resolution 2.2.1.4. About the opm CLI The opm CLI tool is provided by the Operator Framework for use with the Operator bundle format. This tool allows you to create and maintain catalogs of Operators from a list of Operator bundles that are similar to software repositories. The result is a container image which can be stored in a container registry and then installed on a cluster. A catalog contains a database of pointers to Operator manifest content that can be queried through an included API that is served when the container image is run. On OpenShift Container Platform, Operator Lifecycle Manager (OLM) can reference the image in a catalog source, defined by a CatalogSource object, which polls the image at regular intervals to enable frequent updates to installed Operators on the cluster. See CLI tools for steps on installing the opm CLI. 2.2.2. File-based catalogs File-based catalogs are the latest iteration of the catalog format in Operator Lifecycle Manager (OLM). It is a plain text-based (JSON or YAML) and declarative config evolution of the earlier SQLite database format, and it is fully backwards compatible. The goal of this format is to enable Operator catalog editing, composability, and extensibility. Note The default Red Hat-provided Operator catalogs for OpenShift Container Platform 4.6 and later are currently still shipped in the SQLite database format. Editing With file-based catalogs, users interacting with the contents of a catalog are able to make direct changes to the format and verify that their changes are valid. Because this format is plain text JSON or YAML, catalog maintainers can easily manipulate catalog metadata by hand or with widely known and supported JSON or YAML tooling, such as the jq CLI. This editability enables the following features and user-defined extensions: Promoting an existing bundle to a new channel Changing the default channel of a package Custom algorithms for adding, updating, and removing upgrade edges Composability File-based catalogs are stored in an arbitrary directory hierarchy, which enables catalog composition. For example, consider two separate file-based catalog directories: catalogA and catalogB . A catalog maintainer can create a new combined catalog by making a new directory catalogC and copying catalogA and catalogB into it. This composability enables decentralized catalogs. The format permits Operator authors to maintain Operator-specific catalogs, and it permits maintainers to trivially build a catalog composed of individual Operator catalogs. File-based catalogs can be composed by combining multiple other catalogs, by extracting subsets of one catalog, or a combination of both of these. Note Duplicate packages and duplicate bundles within a package are not permitted. The opm validate command returns an error if any duplicates are found. Because Operator authors are most familiar with their Operator, its dependencies, and its upgrade compatibility, they are able to maintain their own Operator-specific catalog and have direct control over its contents. With file-based catalogs, Operator authors own the task of building and maintaining their packages in a catalog. Composite catalog maintainers, however, only own the task of curating the packages in their catalog and publishing the catalog to users. Extensibility The file-based catalog specification is a low-level representation of a catalog. While it can be maintained directly in its low-level form, catalog maintainers can build interesting extensions on top that can be used by their own custom tooling to make any number of mutations. For example, a tool could translate a high-level API, such as (mode=semver) , down to the low-level, file-based catalog format for upgrade edges. Or a catalog maintainer might need to customize all of the bundle metadata by adding a new property to bundles that meet a certain criteria. While this extensibility allows for additional official tooling to be developed on top of the low-level APIs for future OpenShift Container Platform releases, the major benefit is that catalog maintainers have this capability as well. 2.2.2.1. Directory structure File-based catalogs can be stored and loaded from directory-based file systems. The opm CLI loads the catalog by walking the root directory and recursing into subdirectories. The CLI attempts to load every file it finds and fails if any errors occur. Non-catalog files can be ignored using .indexignore files, which have the same rules for patterns and precedence as .gitignore files. Example .indexignore file # Ignore everything except non-object .json and .yaml files **/* !*.json !*.yaml **/objects/*.json **/objects/*.yaml Catalog maintainers have the flexibility to choose their desired layout, but it is recommended to store each package's file-based catalog blobs in separate subdirectories. Each individual file can be either JSON or YAML; it is not necessary for every file in a catalog to use the same format. Basic recommended structure catalog ├── packageA │ └── index.yaml ├── packageB │ ├── .indexignore │ ├── index.yaml │ └── objects │ └── packageB.v0.1.0.clusterserviceversion.yaml └── packageC └── index.json This recommended structure has the property that each subdirectory in the directory hierarchy is a self-contained catalog, which makes catalog composition, discovery, and navigation trivial file system operations. The catalog could also be included in a parent catalog by copying it into the parent catalog's root directory. 2.2.2.2. Schemas File-based catalogs use a format, based on the CUE language specification , that can be extended with arbitrary schemas. The following _Meta CUE schema defines the format that all file-based catalog blobs must adhere to: _Meta schema _Meta: { // schema is required and must be a non-empty string schema: string & !="" // package is optional, but if it's defined, it must be a non-empty string package?: string & !="" // properties is optional, but if it's defined, it must be a list of 0 or more properties properties?: [... #Property] } #Property: { // type is required type: string & !="" // value is required, and it must not be null value: !=null } Note No CUE schemas listed in this specification should be considered exhaustive. The opm validate command has additional validations that are difficult or impossible to express concisely in CUE. An Operator Lifecycle Manager (OLM) catalog currently uses three schemas ( olm.package , olm.channel , and olm.bundle ), which correspond to OLM's existing package and bundle concepts. Each Operator package in a catalog requires exactly one olm.package blob, at least one olm.channel blob, and one or more olm.bundle blobs. Note All olm.* schemas are reserved for OLM-defined schemas. Custom schemas must use a unique prefix, such as a domain that you own. 2.2.2.2.1. olm.package schema The olm.package schema defines package-level metadata for an Operator. This includes its name, description, default channel, and icon. Example 2.1. olm.package schema #Package: { schema: "olm.package" // Package name name: string & !="" // A description of the package description?: string // The package's default channel defaultChannel: string & !="" // An optional icon icon?: { base64data: string mediatype: string } } 2.2.2.2.2. olm.channel schema The olm.channel schema defines a channel within a package, the bundle entries that are members of the channel, and the upgrade edges for those bundles. A bundle can included as an entry in multiple olm.channel blobs, but it can have only one entry per channel. It is valid for an entry's replaces value to reference another bundle name that cannot be found in this catalog or another catalog. However, all other channel invariants must hold true, such as a channel not having multiple heads. Example 2.2. olm.channel schema #Channel: { schema: "olm.channel" package: string & !="" name: string & !="" entries: [...#ChannelEntry] } #ChannelEntry: { // name is required. It is the name of an `olm.bundle` that // is present in the channel. name: string & !="" // replaces is optional. It is the name of bundle that is replaced // by this entry. It does not have to be present in the entry list. replaces?: string & !="" // skips is optional. It is a list of bundle names that are skipped by // this entry. The skipped bundles do not have to be present in the // entry list. skips?: [...string & !=""] // skipRange is optional. It is the semver range of bundle versions // that are skipped by this entry. skipRange?: string & !="" } 2.2.2.2.3. olm.bundle schema Example 2.3. olm.bundle schema #Bundle: { schema: "olm.bundle" package: string & !="" name: string & !="" image: string & !="" properties: [...#Property] relatedImages?: [...#RelatedImage] } #Property: { // type is required type: string & !="" // value is required, and it must not be null value: !=null } #RelatedImage: { // image is the image reference image: string & !="" // name is an optional descriptive name for an image that // helps identify its purpose in the context of the bundle name?: string & !="" } 2.2.2.3. Properties Properties are arbitrary pieces of metadata that can be attached to file-based catalog schemas. The type field is a string that effectively specifies the semantic and syntactic meaning of the value field. The value can be any arbitrary JSON or YAML. OLM defines a handful of property types, again using the reserved olm.* prefix. 2.2.2.3.1. olm.package property The olm.package property defines the package name and version. This is a required property on bundles, and there must be exactly one of these properties. The packageName field must match the bundle's first-class package field, and the version field must be a valid semantic version. Example 2.4. olm.package property #PropertyPackage: { type: "olm.package" value: { packageName: string & !="" version: string & !="" } } 2.2.2.3.2. olm.gvk property The olm.gvk property defines the group/version/kind (GVK) of a Kubernetes API that is provided by this bundle. This property is used by OLM to resolve a bundle with this property as a dependency for other bundles that list the same GVK as a required API. The GVK must adhere to Kubernetes GVK validations. Example 2.5. olm.gvk property #PropertyGVK: { type: "olm.gvk" value: { group: string & !="" version: string & !="" kind: string & !="" } } 2.2.2.3.3. olm.package.required The olm.package.required property defines the package name and version range of another package that this bundle requires. For every required package property a bundle lists, OLM ensures there is an Operator installed on the cluster for the listed package and in the required version range. The versionRange field must be a valid semantic version (semver) range. Example 2.6. olm.package.required property #PropertyPackageRequired: { type: "olm.package.required" value: { packageName: string & !="" versionRange: string & !="" } } 2.2.2.3.4. olm.gvk.required The olm.gvk.required property defines the group/version/kind (GVK) of a Kubernetes API that this bundle requires. For every required GVK property a bundle lists, OLM ensures there is an Operator installed on the cluster that provides it. The GVK must adhere to Kubernetes GVK validations. Example 2.7. olm.gvk.required property #PropertyGVKRequired: { type: "olm.gvk.required" value: { group: string & !="" version: string & !="" kind: string & !="" } } 2.2.2.4. Example catalog With file-based catalogs, catalog maintainers can focus on Operator curation and compatibility. Because Operator authors have already produced Operator-specific catalogs for their Operators, catalog maintainers can build their catalog by rendering each Operator catalog into a subdirectory of the catalog's root directory. There are many possible ways to build a file-based catalog; the following steps outline a simple approach: Maintain a single configuration file for the catalog, containing image references for each Operator in the catalog: Example catalog configuration file name: community-operators repo: quay.io/community-operators/catalog tag: latest references: - name: etcd-operator image: quay.io/etcd-operator/index@sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03 - name: prometheus-operator image: quay.io/prometheus-operator/index@sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317 Run a script that parses the configuration file and creates a new catalog from its references: Example script name=USD(yq eval '.name' catalog.yaml) mkdir "USDname" yq eval '.name + "/" + .references[].name' catalog.yaml | xargs mkdir for l in USD(yq e '.name as USDcatalog | .references[] | .image + "|" + USDcatalog + "/" + .name + "/index.yaml"' catalog.yaml); do image=USD(echo USDl | cut -d'|' -f1) file=USD(echo USDl | cut -d'|' -f2) opm render "USDimage" > "USDfile" done opm alpha generate dockerfile "USDname" indexImage=USD(yq eval '.repo + ":" + .tag' catalog.yaml) docker build -t "USDindexImage" -f "USDname.Dockerfile" . docker push "USDindexImage" 2.2.2.5. Guidelines Consider the following guidelines when maintaining file-based catalogs. 2.2.2.5.1. Immutable bundles The general advice with Operator Lifecycle Manager (OLM) is that bundle images and their metadata should be treated as immutable. If a broken bundle has been pushed to a catalog, you must assume that at least one of your users has upgraded to that bundle. Based on that assumption, you must release another bundle with an upgrade edge from the broken bundle to ensure users with the broken bundle installed receive an upgrade. OLM will not reinstall an installed bundle if the contents of that bundle are updated in the catalog. However, there are some cases where a change in the catalog metadata is preferred: Channel promotion: If you already released a bundle and later decide that you would like to add it to another channel, you can add an entry for your bundle in another olm.channel blob. New upgrade edges: If you release a new 1.2.z bundle version, for example 1.2.4 , but 1.3.0 is already released, you can update the catalog metadata for 1.3.0 to skip 1.2.4 . 2.2.2.5.2. Source control Catalog metadata should be stored in source control and treated as the source of truth. Updates to catalog images should include the following steps: Update the source-controlled catalog directory with a new commit. Build and push the catalog image. Use a consistent tagging taxonomy, such as :latest or :<target_cluster_version> , so that users can receive updates to a catalog as they become available. 2.2.2.6. CLI usage For instructions about creating file-based catalogs by using the opm CLI, see Managing custom catalogs . For reference documentation about the opm CLI commands related to managing file-based catalogs, see CLI tools . 2.2.2.7. Automation Operator authors and catalog maintainers are encouraged to automate their catalog maintenance with CI/CD workflows. Catalog maintainers can further improve on this by building GitOps automation to accomplish the following tasks: Check that pull request (PR) authors are permitted to make the requested changes, for example by updating their package's image reference. Check that the catalog updates pass the opm validate command. Check that the updated bundle or catalog image references exist, the catalog images run successfully in a cluster, and Operators from that package can be successfully installed. Automatically merge PRs that pass the checks. Automatically rebuild and republish the catalog image. 2.3. Operator Framework glossary of common terms This topic provides a glossary of common terms related to the Operator Framework, including Operator Lifecycle Manager (OLM) and the Operator SDK. 2.3.1. Common Operator Framework terms 2.3.1.1. Bundle In the bundle format, a bundle is a collection of an Operator CSV, manifests, and metadata. Together, they form a unique version of an Operator that can be installed onto the cluster. 2.3.1.2. Bundle image In the bundle format, a bundle image is a container image that is built from Operator manifests and that contains one bundle. Bundle images are stored and distributed by Open Container Initiative (OCI) spec container registries, such as Quay.io or DockerHub. 2.3.1.3. Catalog source A catalog source is a repository of CSVs, CRDs, and packages that define an application. 2.3.1.4. Channel A channel defines a stream of updates for an Operator and is used to roll out updates for subscribers. The head points to the latest version of that channel. For example, a stable channel would have all stable versions of an Operator arranged from the earliest to the latest. An Operator can have several channels, and a subscription binding to a certain channel would only look for updates in that channel. 2.3.1.5. Channel head A channel head refers to the latest known update in a particular channel. 2.3.1.6. Cluster service version A cluster service version (CSV) is a YAML manifest created from Operator metadata that assists OLM in running the Operator in a cluster. It is the metadata that accompanies an Operator container image, used to populate user interfaces with information such as its logo, description, and version. It is also a source of technical information that is required to run the Operator, like the RBAC rules it requires and which custom resources (CRs) it manages or depends on. 2.3.1.7. Dependency An Operator may have a dependency on another Operator being present in the cluster. For example, the Vault Operator has a dependency on the etcd Operator for its data persistence layer. OLM resolves dependencies by ensuring that all specified versions of Operators and CRDs are installed on the cluster during the installation phase. This dependency is resolved by finding and installing an Operator in a catalog that satisfies the required CRD API, and is not related to packages or bundles. 2.3.1.8. Index image In the bundle format, an index image refers to an image of a database (a database snapshot) that contains information about Operator bundles including CSVs and CRDs of all versions. This index can host a history of Operators on a cluster and be maintained by adding or removing Operators using the opm CLI tool. 2.3.1.9. Install plan An install plan is a calculated list of resources to be created to automatically install or upgrade a CSV. 2.3.1.10. Operator group An Operator group configures all Operators deployed in the same namespace as the OperatorGroup object to watch for their CR in a list of namespaces or cluster-wide. 2.3.1.11. Package In the bundle format, a package is a directory that encloses all released history of an Operator with each version. A released version of an Operator is described in a CSV manifest alongside the CRDs. 2.3.1.12. Registry A registry is a database that stores bundle images of Operators, each with all of its latest and historical versions in all channels. 2.3.1.13. Subscription A subscription keeps CSVs up to date by tracking a channel in a package. 2.3.1.14. Update graph An update graph links versions of CSVs together, similar to the update graph of any other packaged software. Operators can be installed sequentially, or certain versions can be skipped. The update graph is expected to grow only at the head with newer versions being added. 2.4. Operator Lifecycle Manager (OLM) 2.4.1. Operator Lifecycle Manager concepts and resources This guide provides an overview of the concepts that drive Operator Lifecycle Manager (OLM) in OpenShift Container Platform. 2.4.1.1. What is Operator Lifecycle Manager? Operator Lifecycle Manager (OLM) helps users install, update, and manage the lifecycle of Kubernetes native applications (Operators) and their associated services running across their OpenShift Container Platform clusters. It is part of the Operator Framework , an open source toolkit designed to manage Operators in an effective, automated, and scalable way. Figure 2.2. Operator Lifecycle Manager workflow OLM runs by default in OpenShift Container Platform 4.9, which aids cluster administrators in installing, upgrading, and granting access to Operators running on their cluster. The OpenShift Container Platform web console provides management screens for cluster administrators to install Operators, as well as grant specific projects access to use the catalog of Operators available on the cluster. For developers, a self-service experience allows provisioning and configuring instances of databases, monitoring, and big data services without having to be subject matter experts, because the Operator has that knowledge baked into it. 2.4.1.2. OLM resources The following custom resource definitions (CRDs) are defined and managed by Operator Lifecycle Manager (OLM): Table 2.1. CRDs managed by OLM and Catalog Operators Resource Short name Description ClusterServiceVersion (CSV) csv Application metadata. For example: name, version, icon, required resources. CatalogSource catsrc A repository of CSVs, CRDs, and packages that define an application. Subscription sub Keeps CSVs up to date by tracking a channel in a package. InstallPlan ip Calculated list of resources to be created to automatically install or upgrade a CSV. OperatorGroup og Configures all Operators deployed in the same namespace as the OperatorGroup object to watch for their custom resource (CR) in a list of namespaces or cluster-wide. OperatorConditions - Creates a communication channel between OLM and an Operator it manages. Operators can write to the Status.Conditions array to communicate complex states to OLM. 2.4.1.2.1. Cluster service version A cluster service version (CSV) represents a specific version of a running Operator on an OpenShift Container Platform cluster. It is a YAML manifest created from Operator metadata that assists Operator Lifecycle Manager (OLM) in running the Operator in the cluster. OLM requires this metadata about an Operator to ensure that it can be kept running safely on a cluster, and to provide information about how updates should be applied as new versions of the Operator are published. This is similar to packaging software for a traditional operating system; think of the packaging step for OLM as the stage at which you make your rpm , deb , or apk bundle. A CSV includes the metadata that accompanies an Operator container image, used to populate user interfaces with information such as its name, version, description, labels, repository link, and logo. A CSV is also a source of technical information required to run the Operator, such as which custom resources (CRs) it manages or depends on, RBAC rules, cluster requirements, and install strategies. This information tells OLM how to create required resources and set up the Operator as a deployment. 2.4.1.2.2. Catalog source A catalog source represents a store of metadata, typically by referencing an index image stored in a container registry. Operator Lifecycle Manager (OLM) queries catalog sources to discover and install Operators and their dependencies. OperatorHub in the OpenShift Container Platform web console also displays the Operators provided by catalog sources. Tip Cluster administrators can view the full list of Operators provided by an enabled catalog source on a cluster by using the Administration Cluster Settings Configuration OperatorHub page in the web console. The spec of a CatalogSource object indicates how to construct a pod or how to communicate with a service that serves the Operator Registry gRPC API. Example 2.8. Example CatalogSource object \ufeffapiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: generation: 1 name: example-catalog 1 namespace: openshift-marketplace 2 annotations: olm.catalogImageTemplate: 3 "quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}.{kube_patch_version}" spec: displayName: Example Catalog 4 image: quay.io/example-org/example-catalog:v1 5 priority: -400 6 publisher: Example Org sourceType: grpc 7 updateStrategy: registryPoll: 8 interval: 30m0s status: connectionState: address: example-catalog.openshift-marketplace.svc:50051 lastConnect: 2021-08-26T18:14:31Z lastObservedState: READY 9 latestImageRegistryPoll: 2021-08-26T18:46:25Z 10 registryService: 11 createdAt: 2021-08-26T16:16:37Z port: 50051 protocol: grpc serviceName: example-catalog serviceNamespace: openshift-marketplace 1 Name for the CatalogSource object. This value is also used as part of the name for the related pod that is created in the requested namespace. 2 Namespace to create the catalog in. To make the catalog available cluster-wide in all namespaces, set this value to openshift-marketplace . The default Red Hat-provided catalog sources also use the openshift-marketplace namespace. Otherwise, set the value to a specific namespace to make the Operator only available in that namespace. 3 Optional: To avoid cluster upgrades potentially leaving Operator installations in an unsupported state or without a continued update path, you can enable automatically changing your Operator catalog's index image version as part of cluster upgrades. Set the olm.catalogImageTemplate annotation to your index image name and use one or more of the Kubernetes cluster version variables as shown when constructing the template for the image tag. The annotation overwrites the spec.image field at run time. See the "Image template for custom catalog sources" section for more details. 4 Display name for the catalog in the web console and CLI. 5 Index image for the catalog. Optionally, can be omitted when using the olm.catalogImageTemplate annotation, which sets the pull spec at run time. 6 Weight for the catalog source. OLM uses the weight for prioritization during dependency resolution. A higher weight indicates the catalog is preferred over lower-weighted catalogs. 7 Source types include the following: grpc with an image reference: OLM pulls the image and runs the pod, which is expected to serve a compliant API. grpc with an address field: OLM attempts to contact the gRPC API at the given address. This should not be used in most cases. configmap : OLM parses config map data and runs a pod that can serve the gRPC API over it. 8 Automatically check for new versions at a given interval to stay up-to-date. 9 Last observed state of the catalog connection. For example: READY : A connection is successfully established. CONNECTING : A connection is attempting to establish. TRANSIENT_FAILURE : A temporary problem has occurred while attempting to establish a connection, such as a timeout. The state will eventually switch back to CONNECTING and try again. See States of Connectivity in the gRPC documentation for more details. 10 Latest time the container registry storing the catalog image was polled to ensure the image is up-to-date. 11 Status information for the catalog's Operator Registry service. Referencing the name of a CatalogSource object in a subscription instructs OLM where to search to find a requested Operator: Example 2.9. Example Subscription object referencing a catalog source apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: example-operator namespace: example-namespace spec: channel: stable name: example-operator source: example-catalog sourceNamespace: openshift-marketplace Additional resources Understanding OperatorHub Red Hat-provided Operator catalogs Adding a catalog source to a cluster Catalog priority Viewing Operator catalog source status by using the CLI 2.4.1.2.2.1. Image template for custom catalog sources Operator compatibility with the underlying cluster can be expressed by a catalog source in various ways. One way, which is used for the default Red Hat-provided catalog sources, is to identify image tags for index images that are specifically created for a particular platform release, for example OpenShift Container Platform 4.9. During a cluster upgrade, the index image tag for the default Red Hat-provided catalog sources are updated automatically by the Cluster Version Operator (CVO) so that Operator Lifecycle Manager (OLM) pulls the updated version of the catalog. For example during an upgrade from OpenShift Container Platform 4.8 to 4.9, the spec.image field in the CatalogSource object for the redhat-operators catalog is updated from: registry.redhat.io/redhat/redhat-operator-index:v4.8 to: registry.redhat.io/redhat/redhat-operator-index:v4.9 However, the CVO does not automatically update image tags for custom catalogs. To ensure users are left with a compatible and supported Operator installation after a cluster upgrade, custom catalogs should also be kept updated to reference an updated index image. Starting in OpenShift Container Platform 4.9, cluster administrators can add the olm.catalogImageTemplate annotation in the CatalogSource object for custom catalogs to an image reference that includes a template. The following Kubernetes version variables are supported for use in the template: kube_major_version kube_minor_version kube_patch_version Note You must specify the Kubernetes cluster version and not an OpenShift Container Platform cluster version, as the latter is not currently available for templating. Provided that you have created and pushed an index image with a tag specifying the updated Kubernetes version, setting this annotation enables the index image versions in custom catalogs to be automatically changed after a cluster upgrade. The annotation value is used to set or update the image reference in the spec.image field of the CatalogSource object. This helps avoid cluster upgrades leaving Operator installations in unsupported states or without a continued update path. Important You must ensure that the index image with the updated tag, in whichever registry it is stored in, is accessible by the cluster at the time of the cluster upgrade. Example 2.10. Example catalog source with an image template apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: generation: 1 name: example-catalog namespace: openshift-marketplace annotations: olm.catalogImageTemplate: "quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}" spec: displayName: Example Catalog image: quay.io/example-org/example-catalog:v1.22 priority: -400 publisher: Example Org Note If the spec.image field and the olm.catalogImageTemplate annotation are both set, the spec.image field is overwritten by the resolved value from the annotation. If the annotation does not resolve to a usable pull spec, the catalog source falls back to the set spec.image value. If the spec.image field is not set and the annotation does not resolve to a usable pull spec, OLM stops reconciliation of the catalog source and sets it into a human-readable error condition. For an OpenShift Container Platform 4.9 cluster, which uses Kubernetes 1.22, the olm.catalogImageTemplate annotation in the preceding example resolves to the following image reference: quay.io/example-org/example-catalog:v1.22 For future releases of OpenShift Container Platform, you can create updated index images for your custom catalogs that target the later Kubernetes version that is used by the later OpenShift Container Platform version. With the olm.catalogImageTemplate annotation set before the upgrade, upgrading the cluster to the later OpenShift Container Platform version would then automatically update the catalog's index image as well. 2.4.1.2.3. Subscription A subscription , defined by a Subscription object, represents an intention to install an Operator. It is the custom resource that relates an Operator to a catalog source. Subscriptions describe which channel of an Operator package to subscribe to, and whether to perform updates automatically or manually. If set to automatic, the subscription ensures Operator Lifecycle Manager (OLM) manages and upgrades the Operator to ensure that the latest version is always running in the cluster. Example Subscription object apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: example-operator namespace: example-namespace spec: channel: stable name: example-operator source: example-catalog sourceNamespace: openshift-marketplace This Subscription object defines the name and namespace of the Operator, as well as the catalog from which the Operator data can be found. The channel, such as alpha , beta , or stable , helps determine which Operator stream should be installed from the catalog source. The names of channels in a subscription can differ between Operators, but the naming scheme should follow a common convention within a given Operator. For example, channel names might follow a minor release update stream for the application provided by the Operator ( 1.2 , 1.3 ) or a release frequency ( stable , fast ). In addition to being easily visible from the OpenShift Container Platform web console, it is possible to identify when there is a newer version of an Operator available by inspecting the status of the related subscription. The value associated with the currentCSV field is the newest version that is known to OLM, and installedCSV is the version that is installed on the cluster. Additional resources Viewing Operator subscription status by using the CLI 2.4.1.2.4. Install plan An install plan , defined by an InstallPlan object, describes a set of resources that Operator Lifecycle Manager (OLM) creates to install or upgrade to a specific version of an Operator. The version is defined by a cluster service version (CSV). To install an Operator, a cluster administrator, or a user who has been granted Operator installation permissions, must first create a Subscription object. A subscription represents the intent to subscribe to a stream of available versions of an Operator from a catalog source. The subscription then creates an InstallPlan object to facilitate the installation of the resources for the Operator. The install plan must then be approved according to one of the following approval strategies: If the subscription's spec.installPlanApproval field is set to Automatic , the install plan is approved automatically. If the subscription's spec.installPlanApproval field is set to Manual , the install plan must be manually approved by a cluster administrator or user with proper permissions. After the install plan is approved, OLM creates the specified resources and installs the Operator in the namespace that is specified by the subscription. Example 2.11. Example InstallPlan object apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan metadata: name: install-abcde namespace: operators spec: approval: Automatic approved: true clusterServiceVersionNames: - my-operator.v1.0.1 generation: 1 status: ... catalogSources: [] conditions: - lastTransitionTime: '2021-01-01T20:17:27Z' lastUpdateTime: '2021-01-01T20:17:27Z' status: 'True' type: Installed phase: Complete plan: - resolving: my-operator.v1.0.1 resource: group: operators.coreos.com kind: ClusterServiceVersion manifest: >- ... name: my-operator.v1.0.1 sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1alpha1 status: Created - resolving: my-operator.v1.0.1 resource: group: apiextensions.k8s.io kind: CustomResourceDefinition manifest: >- ... name: webservers.web.servers.org sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1beta1 status: Created - resolving: my-operator.v1.0.1 resource: group: '' kind: ServiceAccount manifest: >- ... name: my-operator sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1 status: Created - resolving: my-operator.v1.0.1 resource: group: rbac.authorization.k8s.io kind: Role manifest: >- ... name: my-operator.v1.0.1-my-operator-6d7cbc6f57 sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1 status: Created - resolving: my-operator.v1.0.1 resource: group: rbac.authorization.k8s.io kind: RoleBinding manifest: >- ... name: my-operator.v1.0.1-my-operator-6d7cbc6f57 sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1 status: Created ... Additional resources Allowing non-cluster administrators to install Operators 2.4.1.2.5. Operator groups An Operator group , defined by the OperatorGroup resource, provides multitenant configuration to OLM-installed Operators. An Operator group selects target namespaces in which to generate required RBAC access for its member Operators. The set of target namespaces is provided by a comma-delimited string stored in the olm.targetNamespaces annotation of a cluster service version (CSV). This annotation is applied to the CSV instances of member Operators and is projected into their deployments. Additional resources Operator groups . 2.4.1.2.6. Operator conditions As part of its role in managing the lifecycle of an Operator, Operator Lifecycle Manager (OLM) infers the state of an Operator from the state of Kubernetes resources that define the Operator. While this approach provides some level of assurance that an Operator is in a given state, there are many instances where an Operator might need to communicate information to OLM that could not be inferred otherwise. This information can then be used by OLM to better manage the lifecycle of the Operator. OLM provides a custom resource definition (CRD) called OperatorCondition that allows Operators to communicate conditions to OLM. There are a set of supported conditions that influence management of the Operator by OLM when present in the Spec.Conditions array of an OperatorCondition resource. Note By default, the Spec.Conditions array is not present in an OperatorCondition object until it is either added by a user or as a result of custom Operator logic. Additional resources Operator conditions . 2.4.2. Operator Lifecycle Manager architecture This guide outlines the component architecture of Operator Lifecycle Manager (OLM) in OpenShift Container Platform. 2.4.2.1. Component responsibilities Operator Lifecycle Manager (OLM) is composed of two Operators: the OLM Operator and the Catalog Operator. Each of these Operators is responsible for managing the custom resource definitions (CRDs) that are the basis for the OLM framework: Table 2.2. CRDs managed by OLM and Catalog Operators Resource Short name Owner Description ClusterServiceVersion (CSV) csv OLM Application metadata: name, version, icon, required resources, installation, and so on. InstallPlan ip Catalog Calculated list of resources to be created to automatically install or upgrade a CSV. CatalogSource catsrc Catalog A repository of CSVs, CRDs, and packages that define an application. Subscription sub Catalog Used to keep CSVs up to date by tracking a channel in a package. OperatorGroup og OLM Configures all Operators deployed in the same namespace as the OperatorGroup object to watch for their custom resource (CR) in a list of namespaces or cluster-wide. Each of these Operators is also responsible for creating the following resources: Table 2.3. Resources created by OLM and Catalog Operators Resource Owner Deployments OLM ServiceAccounts (Cluster)Roles (Cluster)RoleBindings CustomResourceDefinitions (CRDs) Catalog ClusterServiceVersions 2.4.2.2. OLM Operator The OLM Operator is responsible for deploying applications defined by CSV resources after the required resources specified in the CSV are present in the cluster. The OLM Operator is not concerned with the creation of the required resources; you can choose to manually create these resources using the CLI or using the Catalog Operator. This separation of concern allows users incremental buy-in in terms of how much of the OLM framework they choose to leverage for their application. The OLM Operator uses the following workflow: Watch for cluster service versions (CSVs) in a namespace and check that requirements are met. If requirements are met, run the install strategy for the CSV. Note A CSV must be an active member of an Operator group for the install strategy to run. 2.4.2.3. Catalog Operator The Catalog Operator is responsible for resolving and installing cluster service versions (CSVs) and the required resources they specify. It is also responsible for watching catalog sources for updates to packages in channels and upgrading them, automatically if desired, to the latest available versions. To track a package in a channel, you can create a Subscription object configuring the desired package, channel, and the CatalogSource object you want to use for pulling updates. When updates are found, an appropriate InstallPlan object is written into the namespace on behalf of the user. The Catalog Operator uses the following workflow: Connect to each catalog source in the cluster. Watch for unresolved install plans created by a user, and if found: Find the CSV matching the name requested and add the CSV as a resolved resource. For each managed or required CRD, add the CRD as a resolved resource. For each required CRD, find the CSV that manages it. Watch for resolved install plans and create all of the discovered resources for it, if approved by a user or automatically. Watch for catalog sources and subscriptions and create install plans based on them. 2.4.2.4. Catalog Registry The Catalog Registry stores CSVs and CRDs for creation in a cluster and stores metadata about packages and channels. A package manifest is an entry in the Catalog Registry that associates a package identity with sets of CSVs. Within a package, channels point to a particular CSV. Because CSVs explicitly reference the CSV that they replace, a package manifest provides the Catalog Operator with all of the information that is required to update a CSV to the latest version in a channel, stepping through each intermediate version. 2.4.3. Operator Lifecycle Manager workflow This guide outlines the workflow of Operator Lifecycle Manager (OLM) in OpenShift Container Platform. 2.4.3.1. Operator installation and upgrade workflow in OLM In the Operator Lifecycle Manager (OLM) ecosystem, the following resources are used to resolve Operator installations and upgrades: ClusterServiceVersion (CSV) CatalogSource Subscription Operator metadata, defined in CSVs, can be stored in a collection called a catalog source. OLM uses catalog sources, which use the Operator Registry API , to query for available Operators as well as upgrades for installed Operators. Figure 2.3. Catalog source overview Within a catalog source, Operators are organized into packages and streams of updates called channels , which should be a familiar update pattern from OpenShift Container Platform or other software on a continuous release cycle like web browsers. Figure 2.4. Packages and channels in a Catalog source A user indicates a particular package and channel in a particular catalog source in a subscription , for example an etcd package and its alpha channel. If a subscription is made to a package that has not yet been installed in the namespace, the latest Operator for that package is installed. Note OLM deliberately avoids version comparisons, so the "latest" or "newest" Operator available from a given catalog channel package path does not necessarily need to be the highest version number. It should be thought of more as the head reference of a channel, similar to a Git repository. Each CSV has a replaces parameter that indicates which Operator it replaces. This builds a graph of CSVs that can be queried by OLM, and updates can be shared between channels. Channels can be thought of as entry points into the graph of updates: Figure 2.5. OLM graph of available channel updates Example channels in a package packageName: example channels: - name: alpha currentCSV: example.v0.1.2 - name: beta currentCSV: example.v0.1.3 defaultChannel: alpha For OLM to successfully query for updates, given a catalog source, package, channel, and CSV, a catalog must be able to return, unambiguously and deterministically, a single CSV that replaces the input CSV. 2.4.3.1.1. Example upgrade path For an example upgrade scenario, consider an installed Operator corresponding to CSV version 0.1.1 . OLM queries the catalog source and detects an upgrade in the subscribed channel with new CSV version 0.1.3 that replaces an older but not-installed CSV version 0.1.2 , which in turn replaces the older and installed CSV version 0.1.1 . OLM walks back from the channel head to versions via the replaces field specified in the CSVs to determine the upgrade path 0.1.3 0.1.2 0.1.1 ; the direction of the arrow indicates that the former replaces the latter. OLM upgrades the Operator one version at the time until it reaches the channel head. For this given scenario, OLM installs Operator version 0.1.2 to replace the existing Operator version 0.1.1 . Then, it installs Operator version 0.1.3 to replace the previously installed Operator version 0.1.2 . At this point, the installed operator version 0.1.3 matches the channel head and the upgrade is completed. 2.4.3.1.2. Skipping upgrades The basic path for upgrades in OLM is: A catalog source is updated with one or more updates to an Operator. OLM traverses every version of the Operator until reaching the latest version the catalog source contains. However, sometimes this is not a safe operation to perform. There will be cases where a published version of an Operator should never be installed on a cluster if it has not already, for example because a version introduces a serious vulnerability. In those cases, OLM must consider two cluster states and provide an update graph that supports both: The "bad" intermediate Operator has been seen by the cluster and installed. The "bad" intermediate Operator has not yet been installed onto the cluster. By shipping a new catalog and adding a skipped release, OLM is ensured that it can always get a single unique update regardless of the cluster state and whether it has seen the bad update yet. Example CSV with skipped release apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: name: etcdoperator.v0.9.2 namespace: placeholder annotations: spec: displayName: etcd description: Etcd Operator replaces: etcdoperator.v0.9.0 skips: - etcdoperator.v0.9.1 Consider the following example of Old CatalogSource and New CatalogSource . Figure 2.6. Skipping updates This graph maintains that: Any Operator found in Old CatalogSource has a single replacement in New CatalogSource . Any Operator found in New CatalogSource has a single replacement in New CatalogSource . If the bad update has not yet been installed, it will never be. 2.4.3.1.3. Replacing multiple Operators Creating New CatalogSource as described requires publishing CSVs that replace one Operator, but can skip several. This can be accomplished using the skipRange annotation: olm.skipRange: <semver_range> where <semver_range> has the version range format supported by the semver library . When searching catalogs for updates, if the head of a channel has a skipRange annotation and the currently installed Operator has a version field that falls in the range, OLM updates to the latest entry in the channel. The order of precedence is: Channel head in the source specified by sourceName on the subscription, if the other criteria for skipping are met. The Operator that replaces the current one, in the source specified by sourceName . Channel head in another source that is visible to the subscription, if the other criteria for skipping are met. The Operator that replaces the current one in any source visible to the subscription. Example CSV with skipRange apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: name: elasticsearch-operator.v4.1.2 namespace: <namespace> annotations: olm.skipRange: '>=4.1.0 <4.1.2' 2.4.3.1.4. Z-stream support A z-stream , or patch release, must replace all z-stream releases for the same minor version. OLM does not consider major, minor, or patch versions, it just needs to build the correct graph in a catalog. In other words, OLM must be able to take a graph as in Old CatalogSource and, similar to before, generate a graph as in New CatalogSource : Figure 2.7. Replacing several Operators This graph maintains that: Any Operator found in Old CatalogSource has a single replacement in New CatalogSource . Any Operator found in New CatalogSource has a single replacement in New CatalogSource . Any z-stream release in Old CatalogSource will update to the latest z-stream release in New CatalogSource . Unavailable releases can be considered "virtual" graph nodes; their content does not need to exist, the registry just needs to respond as if the graph looks like this. 2.4.4. Operator Lifecycle Manager dependency resolution This guide outlines dependency resolution and custom resource definition (CRD) upgrade lifecycles with Operator Lifecycle Manager (OLM) in OpenShift Container Platform. 2.4.4.1. About dependency resolution OLM manages the dependency resolution and upgrade lifecycle of running Operators. In many ways, the problems OLM faces are similar to other operating system package managers like yum and rpm . However, there is one constraint that similar systems do not generally have that OLM does: because Operators are always running, OLM attempts to ensure that you are never left with a set of Operators that do not work with each other. This means that OLM must never do the following: Install a set of Operators that require APIs that cannot be provided. Update an Operator in a way that breaks another that depends upon it. 2.4.4.2. Dependencies file The dependencies of an Operator are listed in a dependencies.yaml file in the metadata/ folder of a bundle. This file is optional and currently only used to specify explicit Operator-version dependencies. The dependency list contains a type field for each item to specify what kind of dependency this is. There are two supported types of Operator dependencies: olm.package : This type indicates a dependency for a specific Operator version. The dependency information must include the package name and the version of the package in semver format. For example, you can specify an exact version such as 0.5.2 or a range of versions such as >0.5.1 . olm.gvk : With a gvk type, the author can specify a dependency with group/version/kind (GVK) information, similar to existing CRD and API-based usage in a CSV. This is a path to enable Operator authors to consolidate all dependencies, API or explicit versions, to be in the same place. In the following example, dependencies are specified for a Prometheus Operator and etcd CRDs: Example dependencies.yaml file dependencies: - type: olm.package value: packageName: prometheus version: ">0.27.0" - type: olm.gvk value: group: etcd.database.coreos.com kind: EtcdCluster version: v1beta2 2.4.4.3. Dependency preferences There can be many options that equally satisfy a dependency of an Operator. The dependency resolver in Operator Lifecycle Manager (OLM) determines which option best fits the requirements of the requested Operator. As an Operator author or user, it can be important to understand how these choices are made so that dependency resolution is clear. 2.4.4.3.1. Catalog priority On OpenShift Container Platform cluster, OLM reads catalog sources to know which Operators are available for installation. Example CatalogSource object apiVersion: "operators.coreos.com/v1alpha1" kind: "CatalogSource" metadata: name: "my-operators" namespace: "operators" spec: sourceType: grpc image: example.com/my/operator-index:v1 displayName: "My Operators" priority: 100 A CatalogSource object has a priority field, which is used by the resolver to know how to prefer options for a dependency. There are two rules that govern catalog preference: Options in higher-priority catalogs are preferred to options in lower-priority catalogs. Options in the same catalog as the dependent are preferred to any other catalogs. 2.4.4.3.2. Channel ordering An Operator package in a catalog is a collection of update channels that a user can subscribe to in an OpenShift Container Platform cluster. Channels can be used to provide a particular stream of updates for a minor release ( 1.2 , 1.3 ) or a release frequency ( stable , fast ). It is likely that a dependency might be satisfied by Operators in the same package, but different channels. For example, version 1.2 of an Operator might exist in both the stable and fast channels. Each package has a default channel, which is always preferred to non-default channels. If no option in the default channel can satisfy a dependency, options are considered from the remaining channels in lexicographic order of the channel name. 2.4.4.3.3. Order within a channel There are almost always multiple options to satisfy a dependency within a single channel. For example, Operators in one package and channel provide the same set of APIs. When a user creates a subscription, they indicate which channel to receive updates from. This immediately reduces the search to just that one channel. But within the channel, it is likely that many Operators satisfy a dependency. Within a channel, newer Operators that are higher up in the update graph are preferred. If the head of a channel satisfies a dependency, it will be tried first. 2.4.4.3.4. Other constraints In addition to the constraints supplied by package dependencies, OLM includes additional constraints to represent the desired user state and enforce resolution invariants. 2.4.4.3.4.1. Subscription constraint A subscription constraint filters the set of Operators that can satisfy a subscription. Subscriptions are user-supplied constraints for the dependency resolver. They declare the intent to either install a new Operator if it is not already on the cluster, or to keep an existing Operator updated. 2.4.4.3.4.2. Package constraint Within a namespace, no two Operators may come from the same package. 2.4.4.4. CRD upgrades OLM upgrades a custom resource definition (CRD) immediately if it is owned by a singular cluster service version (CSV). If a CRD is owned by multiple CSVs, then the CRD is upgraded when it has satisfied all of the following backward compatible conditions: All existing serving versions in the current CRD are present in the new CRD. All existing instances, or custom resources, that are associated with the serving versions of the CRD are valid when validated against the validation schema of the new CRD. Additional resources Adding a new CRD version Deprecating or removing a CRD version 2.4.4.5. Dependency best practices When specifying dependencies, there are best practices you should consider. Depend on APIs or a specific version range of Operators Operators can add or remove APIs at any time; always specify an olm.gvk dependency on any APIs your Operators requires. The exception to this is if you are specifying olm.package constraints instead. Set a minimum version The Kubernetes documentation on API changes describes what changes are allowed for Kubernetes-style Operators. These versioning conventions allow an Operator to update an API without bumping the API version, as long as the API is backwards-compatible. For Operator dependencies, this means that knowing the API version of a dependency might not be enough to ensure the dependent Operator works as intended. For example: TestOperator v1.0.0 provides v1alpha1 API version of the MyObject resource. TestOperator v1.0.1 adds a new field spec.newfield to MyObject , but still at v1alpha1. Your Operator might require the ability to write spec.newfield into the MyObject resource. An olm.gvk constraint alone is not enough for OLM to determine that you need TestOperator v1.0.1 and not TestOperator v1.0.0. Whenever possible, if a specific Operator that provides an API is known ahead of time, specify an additional olm.package constraint to set a minimum. Omit a maximum version or allow a very wide range Because Operators provide cluster-scoped resources such as API services and CRDs, an Operator that specifies a small window for a dependency might unnecessarily constrain updates for other consumers of that dependency. Whenever possible, do not set a maximum version. Alternatively, set a very wide semantic range to prevent conflicts with other Operators. For example, >1.0.0 <2.0.0 . Unlike with conventional package managers, Operator authors explicitly encode that updates are safe through channels in OLM. If an update is available for an existing subscription, it is assumed that the Operator author is indicating that it can update from the version. Setting a maximum version for a dependency overrides the update stream of the author by unnecessarily truncating it at a particular upper bound. Note Cluster administrators cannot override dependencies set by an Operator author. However, maximum versions can and should be set if there are known incompatibilities that must be avoided. Specific versions can be omitted with the version range syntax, for example > 1.0.0 !1.2.1 . Additional resources Kubernetes documentation: Changing the API 2.4.4.6. Dependency caveats When specifying dependencies, there are caveats you should consider. No compound constraints (AND) There is currently no method for specifying an AND relationship between constraints. In other words, there is no way to specify that one Operator depends on another Operator that both provides a given API and has version >1.1.0 . This means that when specifying a dependency such as: dependencies: - type: olm.package value: packageName: etcd version: ">3.1.0" - type: olm.gvk value: group: etcd.database.coreos.com kind: EtcdCluster version: v1beta2 It would be possible for OLM to satisfy this with two Operators: one that provides EtcdCluster and one that has version >3.1.0 . Whether that happens, or whether an Operator is selected that satisfies both constraints, depends on the ordering that potential options are visited. Dependency preferences and ordering options are well-defined and can be reasoned about, but to exercise caution, Operators should stick to one mechanism or the other. Cross-namespace compatibility OLM performs dependency resolution at the namespace scope. It is possible to get into an update deadlock if updating an Operator in one namespace would be an issue for an Operator in another namespace, and vice-versa. 2.4.4.7. Example dependency resolution scenarios In the following examples, a provider is an Operator which "owns" a CRD or API service. Example: Deprecating dependent APIs A and B are APIs (CRDs): The provider of A depends on B. The provider of B has a subscription. The provider of B updates to provide C but deprecates B. This results in: B no longer has a provider. A no longer works. This is a case OLM prevents with its upgrade strategy. Example: Version deadlock A and B are APIs: The provider of A requires B. The provider of B requires A. The provider of A updates to (provide A2, require B2) and deprecate A. The provider of B updates to (provide B2, require A2) and deprecate B. If OLM attempts to update A without simultaneously updating B, or vice-versa, it is unable to progress to new versions of the Operators, even though a new compatible set can be found. This is another case OLM prevents with its upgrade strategy. 2.4.5. Operator groups This guide outlines the use of Operator groups with Operator Lifecycle Manager (OLM) in OpenShift Container Platform. 2.4.5.1. About Operator groups An Operator group , defined by the OperatorGroup resource, provides multitenant configuration to OLM-installed Operators. An Operator group selects target namespaces in which to generate required RBAC access for its member Operators. The set of target namespaces is provided by a comma-delimited string stored in the olm.targetNamespaces annotation of a cluster service version (CSV). This annotation is applied to the CSV instances of member Operators and is projected into their deployments. 2.4.5.2. Operator group membership An Operator is considered a member of an Operator group if the following conditions are true: The CSV of the Operator exists in the same namespace as the Operator group. The install modes in the CSV of the Operator support the set of namespaces targeted by the Operator group. An install mode in a CSV consists of an InstallModeType field and a boolean Supported field. The spec of a CSV can contain a set of install modes of four distinct InstallModeTypes : Table 2.4. Install modes and supported Operator groups InstallModeType Description OwnNamespace The Operator can be a member of an Operator group that selects its own namespace. SingleNamespace The Operator can be a member of an Operator group that selects one namespace. MultiNamespace The Operator can be a member of an Operator group that selects more than one namespace. AllNamespaces The Operator can be a member of an Operator group that selects all namespaces (target namespace set is the empty string "" ). Note If the spec of a CSV omits an entry of InstallModeType , then that type is considered unsupported unless support can be inferred by an existing entry that implicitly supports it. 2.4.5.3. Target namespace selection You can explicitly name the target namespace for an Operator group using the spec.targetNamespaces parameter: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: my-group namespace: my-namespace spec: targetNamespaces: - my-namespace You can alternatively specify a namespace using a label selector with the spec.selector parameter: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: my-group namespace: my-namespace spec: selector: cool.io/prod: "true" Important Listing multiple namespaces via spec.targetNamespaces or use of a label selector via spec.selector is not recommended, as the support for more than one target namespace in an Operator group will likely be removed in a future release. If both spec.targetNamespaces and spec.selector are defined, spec.selector is ignored. Alternatively, you can omit both spec.selector and spec.targetNamespaces to specify a global Operator group, which selects all namespaces: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: my-group namespace: my-namespace The resolved set of selected namespaces is shown in the status.namespaces parameter of an Opeator group. The status.namespace of a global Operator group contains the empty string ( "" ), which signals to a consuming Operator that it should watch all namespaces. 2.4.5.4. Operator group CSV annotations Member CSVs of an Operator group have the following annotations: Annotation Description olm.operatorGroup=<group_name> Contains the name of the Operator group. olm.operatorNamespace=<group_namespace> Contains the namespace of the Operator group. olm.targetNamespaces=<target_namespaces> Contains a comma-delimited string that lists the target namespace selection of the Operator group. Note All annotations except olm.targetNamespaces are included with copied CSVs. Omitting the olm.targetNamespaces annotation on copied CSVs prevents the duplication of target namespaces between tenants. 2.4.5.5. Provided APIs annotation A group/version/kind (GVK) is a unique identifier for a Kubernetes API. Information about what GVKs are provided by an Operator group are shown in an olm.providedAPIs annotation. The value of the annotation is a string consisting of <kind>.<version>.<group> delimited with commas. The GVKs of CRDs and API services provided by all active member CSVs of an Operator group are included. Review the following example of an OperatorGroup object with a single active member CSV that provides the PackageManifest resource: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: annotations: olm.providedAPIs: PackageManifest.v1alpha1.packages.apps.redhat.com name: olm-operators namespace: local ... spec: selector: {} serviceAccount: metadata: creationTimestamp: null targetNamespaces: - local status: lastUpdated: 2019-02-19T16:18:28Z namespaces: - local 2.4.5.6. Role-based access control When an Operator group is created, three cluster roles are generated. Each contains a single aggregation rule with a cluster role selector set to match a label, as shown below: Cluster role Label to match <operatorgroup_name>-admin olm.opgroup.permissions/aggregate-to-admin: <operatorgroup_name> <operatorgroup_name>-edit olm.opgroup.permissions/aggregate-to-edit: <operatorgroup_name> <operatorgroup_name>-view olm.opgroup.permissions/aggregate-to-view: <operatorgroup_name> The following RBAC resources are generated when a CSV becomes an active member of an Operator group, as long as the CSV is watching all namespaces with the AllNamespaces install mode and is not in a failed state with reason InterOperatorGroupOwnerConflict : Cluster roles for each API resource from a CRD Cluster roles for each API resource from an API service Additional roles and role bindings Table 2.5. Cluster roles generated for each API resource from a CRD Cluster role Settings <kind>.<group>-<version>-admin Verbs on <kind> : * Aggregation labels: rbac.authorization.k8s.io/aggregate-to-admin: true olm.opgroup.permissions/aggregate-to-admin: <operatorgroup_name> <kind>.<group>-<version>-edit Verbs on <kind> : create update patch delete Aggregation labels: rbac.authorization.k8s.io/aggregate-to-edit: true olm.opgroup.permissions/aggregate-to-edit: <operatorgroup_name> <kind>.<group>-<version>-view Verbs on <kind> : get list watch Aggregation labels: rbac.authorization.k8s.io/aggregate-to-view: true olm.opgroup.permissions/aggregate-to-view: <operatorgroup_name> <kind>.<group>-<version>-view-crdview Verbs on apiextensions.k8s.io customresourcedefinitions <crd-name> : get Aggregation labels: rbac.authorization.k8s.io/aggregate-to-view: true olm.opgroup.permissions/aggregate-to-view: <operatorgroup_name> Table 2.6. Cluster roles generated for each API resource from an API service Cluster role Settings <kind>.<group>-<version>-admin Verbs on <kind> : * Aggregation labels: rbac.authorization.k8s.io/aggregate-to-admin: true olm.opgroup.permissions/aggregate-to-admin: <operatorgroup_name> <kind>.<group>-<version>-edit Verbs on <kind> : create update patch delete Aggregation labels: rbac.authorization.k8s.io/aggregate-to-edit: true olm.opgroup.permissions/aggregate-to-edit: <operatorgroup_name> <kind>.<group>-<version>-view Verbs on <kind> : get list watch Aggregation labels: rbac.authorization.k8s.io/aggregate-to-view: true olm.opgroup.permissions/aggregate-to-view: <operatorgroup_name> Additional roles and role bindings If the CSV defines exactly one target namespace that contains * , then a cluster role and corresponding cluster role binding are generated for each permission defined in the permissions field of the CSV. All resources generated are given the olm.owner: <csv_name> and olm.owner.namespace: <csv_namespace> labels. If the CSV does not define exactly one target namespace that contains * , then all roles and role bindings in the Operator namespace with the olm.owner: <csv_name> and olm.owner.namespace: <csv_namespace> labels are copied into the target namespace. 2.4.5.7. Copied CSVs OLM creates copies of all active member CSVs of an Operator group in each of the target namespaces of that Operator group. The purpose of a copied CSV is to tell users of a target namespace that a specific Operator is configured to watch resources created there. Copied CSVs have a status reason Copied and are updated to match the status of their source CSV. The olm.targetNamespaces annotation is stripped from copied CSVs before they are created on the cluster. Omitting the target namespace selection avoids the duplication of target namespaces between tenants. Copied CSVs are deleted when their source CSV no longer exists or the Operator group that their source CSV belongs to no longer targets the namespace of the copied CSV. 2.4.5.8. Static Operator groups An Operator group is static if its spec.staticProvidedAPIs field is set to true . As a result, OLM does not modify the olm.providedAPIs annotation of an Operator group, which means that it can be set in advance. This is useful when a user wants to use an Operator group to prevent resource contention in a set of namespaces but does not have active member CSVs that provide the APIs for those resources. Below is an example of an Operator group that protects Prometheus resources in all namespaces with the something.cool.io/cluster-monitoring: "true" annotation: apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: cluster-monitoring namespace: cluster-monitoring annotations: olm.providedAPIs: Alertmanager.v1.monitoring.coreos.com,Prometheus.v1.monitoring.coreos.com,PrometheusRule.v1.monitoring.coreos.com,ServiceMonitor.v1.monitoring.coreos.com spec: staticProvidedAPIs: true selector: matchLabels: something.cool.io/cluster-monitoring: "true" 2.4.5.9. Operator group intersection Two Operator groups are said to have intersecting provided APIs if the intersection of their target namespace sets is not an empty set and the intersection of their provided API sets, defined by olm.providedAPIs annotations, is not an empty set. A potential issue is that Operator groups with intersecting provided APIs can compete for the same resources in the set of intersecting namespaces. Note When checking intersection rules, an Operator group namespace is always included as part of its selected target namespaces. Rules for intersection Each time an active member CSV synchronizes, OLM queries the cluster for the set of intersecting provided APIs between the Operator group of the CSV and all others. OLM then checks if that set is an empty set: If true and the CSV's provided APIs are a subset of the Operator group's: Continue transitioning. If true and the CSV's provided APIs are not a subset of the Operator group's: If the Operator group is static: Clean up any deployments that belong to the CSV. Transition the CSV to a failed state with status reason CannotModifyStaticOperatorGroupProvidedAPIs . If the Operator group is not static: Replace the Operator group's olm.providedAPIs annotation with the union of itself and the CSV's provided APIs. If false and the CSV's provided APIs are not a subset of the Operator group's: Clean up any deployments that belong to the CSV. Transition the CSV to a failed state with status reason InterOperatorGroupOwnerConflict . If false and the CSV's provided APIs are a subset of the Operator group's: If the Operator group is static: Clean up any deployments that belong to the CSV. Transition the CSV to a failed state with status reason CannotModifyStaticOperatorGroupProvidedAPIs . If the Operator group is not static: Replace the Operator group's olm.providedAPIs annotation with the difference between itself and the CSV's provided APIs. Note Failure states caused by Operator groups are non-terminal. The following actions are performed each time an Operator group synchronizes: The set of provided APIs from active member CSVs is calculated from the cluster. Note that copied CSVs are ignored. The cluster set is compared to olm.providedAPIs , and if olm.providedAPIs contains any extra APIs, then those APIs are pruned. All CSVs that provide the same APIs across all namespaces are requeued. This notifies conflicting CSVs in intersecting groups that their conflict has possibly been resolved, either through resizing or through deletion of the conflicting CSV. 2.4.5.10. Limitations for multi-tenant Operator management OpenShift Container Platform provides limited support for simultaneously installing different variations of an Operator on a cluster. Operators are control plane extensions. All tenants, or namespaces, share the same control plane of a cluster. Therefore, tenants in a multi-tenant environment also have to share Operators. The Operator Lifecycle Manager (OLM) installs Operators multiple times in different namespaces. One constraint of this is that the Operator's API versions must be the same. Different major versions of an Operator often have incompatible custom resource definitions (CRDs). This makes it difficult to quickly verify OLMs. 2.4.5.10.1. Additional resources Allowing non-cluster administrators to install Operators 2.4.5.11. Troubleshooting Operator groups Membership An install plan's namespace must contain only one Operator group. When attempting to generate a cluster service version (CSV) in a namespace, an install plan considers an Operator group invalid in the following scenarios: No Operator groups exist in the install plan's namespace. Multiple Operator groups exist in the install plan's namespace. An incorrect or non-existent service account name is specified in the Operator group. If an install plan encounters an invalid Operator group, the CSV is not generated and the InstallPlan resource continues to install with a relevant message. For example, the following message is provided if more than one Operator group exists in the same namespace: attenuated service account query failed - more than one operator group(s) are managing this namespace count=2 where count= specifies the number of Operator groups in the namespace. If the install modes of a CSV do not support the target namespace selection of the Operator group in its namespace, the CSV transitions to a failure state with the reason UnsupportedOperatorGroup . CSVs in a failed state for this reason transition to pending after either the target namespace selection of the Operator group changes to a supported configuration, or the install modes of the CSV are modified to support the target namespace selection. 2.4.6. Operator conditions This guide outlines how Operator Lifecycle Manager (OLM) uses Operator conditions. 2.4.6.1. About Operator conditions As part of its role in managing the lifecycle of an Operator, Operator Lifecycle Manager (OLM) infers the state of an Operator from the state of Kubernetes resources that define the Operator. While this approach provides some level of assurance that an Operator is in a given state, there are many instances where an Operator might need to communicate information to OLM that could not be inferred otherwise. This information can then be used by OLM to better manage the lifecycle of the Operator. OLM provides a custom resource definition (CRD) called OperatorCondition that allows Operators to communicate conditions to OLM. There are a set of supported conditions that influence management of the Operator by OLM when present in the Spec.Conditions array of an OperatorCondition resource. Note By default, the Spec.Conditions array is not present in an OperatorCondition object until it is either added by a user or as a result of custom Operator logic. 2.4.6.2. Supported conditions Operator Lifecycle Manager (OLM) supports the following Operator conditions. 2.4.6.2.1. Upgradeable condition The Upgradeable Operator condition prevents an existing cluster service version (CSV) from being replaced by a newer version of the CSV. This condition is useful when: An Operator is about to start a critical process and should not be upgraded until the process is completed. An Operator is performing a migration of custom resources (CRs) that must be completed before the Operator is ready to be upgraded. Example Upgradeable Operator condition apiVersion: operators.coreos.com/v1 kind: OperatorCondition metadata: name: my-operator namespace: operators spec: conditions: - type: Upgradeable 1 status: "False" 2 reason: "migration" message: "The Operator is performing a migration." lastTransitionTime: "2020-08-24T23:15:55Z" 1 Name of the condition. 2 A False value indicates the Operator is not ready to be upgraded. OLM prevents a CSV that replaces the existing CSV of the Operator from leaving the Pending phase. 2.4.6.3. Additional resources Managing Operator conditions Enabling Operator conditions 2.4.7. Operator Lifecycle Manager metrics 2.4.7.1. Exposed metrics Operator Lifecycle Manager (OLM) exposes certain OLM-specific resources for use by the Prometheus-based OpenShift Container Platform cluster monitoring stack. Table 2.7. Metrics exposed by OLM Name Description catalog_source_count Number of catalog sources. catalogsource_ready State of a catalog source. The value 1 indicates that the catalog source is in a READY state. The value of 0 indicates that the catalog source is not in a READY state. csv_abnormal When reconciling a cluster service version (CSV), present whenever a CSV version is in any state other than Succeeded , for example when it is not installed. Includes the name , namespace , phase , reason , and version labels. A Prometheus alert is created when this metric is present. csv_count Number of CSVs successfully registered. csv_succeeded When reconciling a CSV, represents whether a CSV version is in a Succeeded state (value 1 ) or not (value 0 ). Includes the name , namespace , and version labels. csv_upgrade_count Monotonic count of CSV upgrades. install_plan_count Number of install plans. installplan_warnings_total Monotonic count of warnings generated by resources, such as deprecated resources, included in an install plan. olm_resolution_duration_seconds The duration of a dependency resolution attempt. subscription_count Number of subscriptions. subscription_sync_total Monotonic count of subscription syncs. Includes the channel , installed CSV, and subscription name labels. 2.4.8. Webhook management in Operator Lifecycle Manager Webhooks allow Operator authors to intercept, modify, and accept or reject resources before they are saved to the object store and handled by the Operator controller. Operator Lifecycle Manager (OLM) can manage the lifecycle of these webhooks when they are shipped alongside your Operator. See Defining cluster service versions (CSVs) for details on how an Operator developer can define webhooks for their Operator, as well as considerations when running on OLM. 2.4.8.1. Additional resources Types of webhook admission plugins Kubernetes documentation: Validating admission webhooks Mutating admission webhooks Conversion webhooks 2.5. Understanding OperatorHub 2.5.1. About OperatorHub OperatorHub is the web console interface in OpenShift Container Platform that cluster administrators use to discover and install Operators. With one click, an Operator can be pulled from its off-cluster source, installed and subscribed on the cluster, and made ready for engineering teams to self-service manage the product across deployment environments using Operator Lifecycle Manager (OLM). Cluster administrators can choose from catalogs grouped into the following categories: Category Description Red Hat Operators Red Hat products packaged and shipped by Red Hat. Supported by Red Hat. Certified Operators Products from leading independent software vendors (ISVs). Red Hat partners with ISVs to package and ship. Supported by the ISV. Red Hat Marketplace Certified software that can be purchased from Red Hat Marketplace . Community Operators Optionally-visible software maintained by relevant representatives in the redhat-openshift-ecosystem/community-operators-prod/operators GitHub repository. No official support. Custom Operators Operators you add to the cluster yourself. If you have not added any custom Operators, the Custom category does not appear in the web console on your OperatorHub. Operators on OperatorHub are packaged to run on OLM. This includes a YAML file called a cluster service version (CSV) containing all of the CRDs, RBAC rules, deployments, and container images required to install and securely run the Operator. It also contains user-visible information like a description of its features and supported Kubernetes versions. The Operator SDK can be used to assist developers packaging their Operators for use on OLM and OperatorHub. If you have a commercial application that you want to make accessible to your customers, get it included using the certification workflow provided on the Red Hat Partner Connect portal at connect.redhat.com . 2.5.2. OperatorHub architecture The OperatorHub UI component is driven by the Marketplace Operator by default on OpenShift Container Platform in the openshift-marketplace namespace. 2.5.2.1. OperatorHub custom resource The Marketplace Operator manages an OperatorHub custom resource (CR) named cluster that manages the default CatalogSource objects provided with OperatorHub. You can modify this resource to enable or disable the default catalogs, which is useful when configuring OpenShift Container Platform in restricted network environments. Example OperatorHub custom resource apiVersion: config.openshift.io/v1 kind: OperatorHub metadata: name: cluster spec: disableAllDefaultSources: true 1 sources: [ 2 { name: "community-operators", disabled: false } ] 1 disableAllDefaultSources is an override that controls availability of all default catalogs that are configured by default during an OpenShift Container Platform installation. 2 Disable default catalogs individually by changing the disabled parameter value per source. 2.5.3. Additional resources Catalog source About the Operator SDK Defining cluster service versions (CSVs) Operator installation and upgrade workflow in OLM Red Hat Partner Connect Red Hat Marketplace 2.6. Red Hat-provided Operator catalogs 2.6.1. About Operator catalogs An Operator catalog is a repository of metadata that Operator Lifecycle Manager (OLM) can query to discover and install Operators and their dependencies on a cluster. OLM always installs Operators from the latest version of a catalog. As of OpenShift Container Platform 4.6, Red Hat-provided catalogs are distributed using index images . An index image, based on the Operator bundle format, is a containerized snapshot of a catalog. It is an immutable artifact that contains the database of pointers to a set of Operator manifest content. A catalog can reference an index image to source its content for OLM on the cluster. As catalogs are updated, the latest versions of Operators change, and older versions may be removed or altered. In addition, when OLM runs on an OpenShift Container Platform cluster in a restricted network environment, it is unable to access the catalogs directly from the internet to pull the latest content. As a cluster administrator, you can create your own custom index image, either based on a Red Hat-provided catalog or from scratch, which can be used to source the catalog content on the cluster. Creating and updating your own index image provides a method for customizing the set of Operators available on the cluster, while also avoiding the aforementioned restricted network environment issues. Important Kubernetes periodically deprecates certain APIs that are removed in subsequent releases. As a result, Operators are unable to use removed APIs starting with the version of OpenShift Container Platform that uses the Kubernetes version that removed the API. If your cluster is using custom catalogs, see Controlling Operator compatibility with OpenShift Container Platform versions for more details about how Operator authors can update their projects to help avoid workload issues and prevent incompatible upgrades. Note Support for the legacy package manifest format for Operators, including custom catalogs that were using the legacy format, is removed in OpenShift Container Platform 4.8 and later. When creating custom catalog images, versions of OpenShift Container Platform 4 required using the oc adm catalog build command, which was deprecated for several releases and is now removed. With the availability of Red Hat-provided index images starting in OpenShift Container Platform 4.6, catalog builders must use the opm index command to manage index images. Additional resources Managing custom catalogs Using Operator Lifecycle Manager on restricted networks 2.6.2. About Red Hat-provided Operator catalogs The Red Hat-provided catalog sources are installed by default in the openshift-marketplace namespace, which makes the catalogs available cluster-wide in all namespaces. The following Operator catalogs are distributed by Red Hat: Catalog Index image Description redhat-operators registry.redhat.io/redhat/redhat-operator-index:v4.9 Red Hat products packaged and shipped by Red Hat. Supported by Red Hat. certified-operators registry.redhat.io/redhat/certified-operator-index:v4.9 Products from leading independent software vendors (ISVs). Red Hat partners with ISVs to package and ship. Supported by the ISV. redhat-marketplace registry.redhat.io/redhat/redhat-marketplace-index:v4.9 Certified software that can be purchased from Red Hat Marketplace . community-operators registry.redhat.io/redhat/community-operator-index:v4.9 Software maintained by relevant representatives in the redhat-openshift-ecosystem/community-operators-prod/operators GitHub repository. No official support. During a cluster upgrade, the index image tag for the default Red Hat-provided catalog sources are updated automatically by the Cluster Version Operator (CVO) so that Operator Lifecycle Manager (OLM) pulls the updated version of the catalog. For example during an upgrade from OpenShift Container Platform 4.8 to 4.9, the spec.image field in the CatalogSource object for the redhat-operators catalog is updated from: registry.redhat.io/redhat/redhat-operator-index:v4.8 to: registry.redhat.io/redhat/redhat-operator-index:v4.9 2.7. CRDs 2.7.1. Extending the Kubernetes API with custom resource definitions Operators use the Kubernetes extension mechanism, custom resource definitions (CRDs), so that custom objects managed by the Operator look and act just like the built-in, native Kubernetes objects. This guide describes how cluster administrators can extend their OpenShift Container Platform cluster by creating and managing CRDs. 2.7.1.1. Custom resource definitions In the Kubernetes API, a resource is an endpoint that stores a collection of API objects of a certain kind. For example, the built-in Pods resource contains a collection of Pod objects. A custom resource definition (CRD) object defines a new, unique object type, called a kind , in the cluster and lets the Kubernetes API server handle its entire lifecycle. Custom resource (CR) objects are created from CRDs that have been added to the cluster by a cluster administrator, allowing all cluster users to add the new resource type into projects. When a cluster administrator adds a new CRD to the cluster, the Kubernetes API server reacts by creating a new RESTful resource path that can be accessed by the entire cluster or a single project (namespace) and begins serving the specified CR. Cluster administrators that want to grant access to the CRD to other users can use cluster role aggregation to grant access to users with the admin , edit , or view default cluster roles. Cluster role aggregation allows the insertion of custom policy rules into these cluster roles. This behavior integrates the new resource into the RBAC policy of the cluster as if it was a built-in resource. Operators in particular make use of CRDs by packaging them with any required RBAC policy and other software-specific logic. Cluster administrators can also add CRDs manually to the cluster outside of the lifecycle of an Operator, making them available to all users. Note While only cluster administrators can create CRDs, developers can create the CR from an existing CRD if they have read and write permission to it. 2.7.1.2. Creating a custom resource definition To create custom resource (CR) objects, cluster administrators must first create a custom resource definition (CRD). Prerequisites Access to an OpenShift Container Platform cluster with cluster-admin user privileges. Procedure To create a CRD: Create a YAML file that contains the following field types: Example YAML file for a CRD apiVersion: apiextensions.k8s.io/v1 1 kind: CustomResourceDefinition metadata: name: crontabs.stable.example.com 2 spec: group: stable.example.com 3 versions: name: v1 4 scope: Namespaced 5 names: plural: crontabs 6 singular: crontab 7 kind: CronTab 8 shortNames: - ct 9 1 Use the apiextensions.k8s.io/v1 API. 2 Specify a name for the definition. This must be in the <plural-name>.<group> format using the values from the group and plural fields. 3 Specify a group name for the API. An API group is a collection of objects that are logically related. For example, all batch objects like Job or ScheduledJob could be in the batch API group (such as batch.api.example.com ). A good practice is to use a fully-qualified-domain name (FQDN) of your organization. 4 Specify a version name to be used in the URL. Each API group can exist in multiple versions, for example v1alpha , v1beta , v1 . 5 Specify whether the custom objects are available to a project ( Namespaced ) or all projects in the cluster ( Cluster ). 6 Specify the plural name to use in the URL. The plural field is the same as a resource in an API URL. 7 Specify a singular name to use as an alias on the CLI and for display. 8 Specify the kind of objects that can be created. The type can be in CamelCase. 9 Specify a shorter string to match your resource on the CLI. Note By default, a CRD is cluster-scoped and available to all projects. Create the CRD object: USD oc create -f <file_name>.yaml A new RESTful API endpoint is created at: /apis/<spec:group>/<spec:version>/<scope>/*/<names-plural>/... For example, using the example file, the following endpoint is created: /apis/stable.example.com/v1/namespaces/*/crontabs/... You can now use this endpoint URL to create and manage CRs. The object kind is based on the spec.kind field of the CRD object you created. 2.7.1.3. Creating cluster roles for custom resource definitions Cluster administrators can grant permissions to existing cluster-scoped custom resource definitions (CRDs). If you use the admin , edit , and view default cluster roles, you can take advantage of cluster role aggregation for their rules. Important You must explicitly assign permissions to each of these roles. The roles with more permissions do not inherit rules from roles with fewer permissions. If you assign a rule to a role, you must also assign that verb to roles that have more permissions. For example, if you grant the get crontabs permission to the view role, you must also grant it to the edit and admin roles. The admin or edit role is usually assigned to the user that created a project through the project template. Prerequisites Create a CRD. Procedure Create a cluster role definition file for the CRD. The cluster role definition is a YAML file that contains the rules that apply to each cluster role. An OpenShift Container Platform controller adds the rules that you specify to the default cluster roles. Example YAML file for a cluster role definition kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 1 metadata: name: aggregate-cron-tabs-admin-edit 2 labels: rbac.authorization.k8s.io/aggregate-to-admin: "true" 3 rbac.authorization.k8s.io/aggregate-to-edit: "true" 4 rules: - apiGroups: ["stable.example.com"] 5 resources: ["crontabs"] 6 verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "deletecollection"] 7 --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: aggregate-cron-tabs-view 8 labels: # Add these permissions to the "view" default role. rbac.authorization.k8s.io/aggregate-to-view: "true" 9 rbac.authorization.k8s.io/aggregate-to-cluster-reader: "true" 10 rules: - apiGroups: ["stable.example.com"] 11 resources: ["crontabs"] 12 verbs: ["get", "list", "watch"] 13 1 Use the rbac.authorization.k8s.io/v1 API. 2 8 Specify a name for the definition. 3 Specify this label to grant permissions to the admin default role. 4 Specify this label to grant permissions to the edit default role. 5 11 Specify the group name of the CRD. 6 12 Specify the plural name of the CRD that these rules apply to. 7 13 Specify the verbs that represent the permissions that are granted to the role. For example, apply read and write permissions to the admin and edit roles and only read permission to the view role. 9 Specify this label to grant permissions to the view default role. 10 Specify this label to grant permissions to the cluster-reader default role. Create the cluster role: USD oc create -f <file_name>.yaml 2.7.1.4. Creating custom resources from a file After a custom resource definitions (CRD) has been added to the cluster, custom resources (CRs) can be created with the CLI from a file using the CR specification. Prerequisites CRD added to the cluster by a cluster administrator. Procedure Create a YAML file for the CR. In the following example definition, the cronSpec and image custom fields are set in a CR of Kind: CronTab . The Kind comes from the spec.kind field of the CRD object: Example YAML file for a CR apiVersion: "stable.example.com/v1" 1 kind: CronTab 2 metadata: name: my-new-cron-object 3 finalizers: 4 - finalizer.stable.example.com spec: 5 cronSpec: "* * * * /5" image: my-awesome-cron-image 1 Specify the group name and API version (name/version) from the CRD. 2 Specify the type in the CRD. 3 Specify a name for the object. 4 Specify the finalizers for the object, if any. Finalizers allow controllers to implement conditions that must be completed before the object can be deleted. 5 Specify conditions specific to the type of object. After you create the file, create the object: USD oc create -f <file_name>.yaml 2.7.1.5. Inspecting custom resources You can inspect custom resource (CR) objects that exist in your cluster using the CLI. Prerequisites A CR object exists in a namespace to which you have access. Procedure To get information on a specific kind of a CR, run: USD oc get <kind> For example: USD oc get crontab Example output NAME KIND my-new-cron-object CronTab.v1.stable.example.com Resource names are not case-sensitive, and you can use either the singular or plural forms defined in the CRD, as well as any short name. For example: USD oc get crontabs USD oc get crontab USD oc get ct You can also view the raw YAML data for a CR: USD oc get <kind> -o yaml For example: USD oc get ct -o yaml Example output apiVersion: v1 items: - apiVersion: stable.example.com/v1 kind: CronTab metadata: clusterName: "" creationTimestamp: 2017-05-31T12:56:35Z deletionGracePeriodSeconds: null deletionTimestamp: null name: my-new-cron-object namespace: default resourceVersion: "285" selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object uid: 9423255b-4600-11e7-af6a-28d2447dc82b spec: cronSpec: '* * * * /5' 1 image: my-awesome-cron-image 2 1 2 Custom data from the YAML that you used to create the object displays. 2.7.2. Managing resources from custom resource definitions This guide describes how developers can manage custom resources (CRs) that come from custom resource definitions (CRDs). 2.7.2.1. Custom resource definitions In the Kubernetes API, a resource is an endpoint that stores a collection of API objects of a certain kind. For example, the built-in Pods resource contains a collection of Pod objects. A custom resource definition (CRD) object defines a new, unique object type, called a kind , in the cluster and lets the Kubernetes API server handle its entire lifecycle. Custom resource (CR) objects are created from CRDs that have been added to the cluster by a cluster administrator, allowing all cluster users to add the new resource type into projects. Operators in particular make use of CRDs by packaging them with any required RBAC policy and other software-specific logic. Cluster administrators can also add CRDs manually to the cluster outside of the lifecycle of an Operator, making them available to all users. Note While only cluster administrators can create CRDs, developers can create the CR from an existing CRD if they have read and write permission to it. 2.7.2.2. Creating custom resources from a file After a custom resource definitions (CRD) has been added to the cluster, custom resources (CRs) can be created with the CLI from a file using the CR specification. Prerequisites CRD added to the cluster by a cluster administrator. Procedure Create a YAML file for the CR. In the following example definition, the cronSpec and image custom fields are set in a CR of Kind: CronTab . The Kind comes from the spec.kind field of the CRD object: Example YAML file for a CR apiVersion: "stable.example.com/v1" 1 kind: CronTab 2 metadata: name: my-new-cron-object 3 finalizers: 4 - finalizer.stable.example.com spec: 5 cronSpec: "* * * * /5" image: my-awesome-cron-image 1 Specify the group name and API version (name/version) from the CRD. 2 Specify the type in the CRD. 3 Specify a name for the object. 4 Specify the finalizers for the object, if any. Finalizers allow controllers to implement conditions that must be completed before the object can be deleted. 5 Specify conditions specific to the type of object. After you create the file, create the object: USD oc create -f <file_name>.yaml 2.7.2.3. Inspecting custom resources You can inspect custom resource (CR) objects that exist in your cluster using the CLI. Prerequisites A CR object exists in a namespace to which you have access. Procedure To get information on a specific kind of a CR, run: USD oc get <kind> For example: USD oc get crontab Example output NAME KIND my-new-cron-object CronTab.v1.stable.example.com Resource names are not case-sensitive, and you can use either the singular or plural forms defined in the CRD, as well as any short name. For example: USD oc get crontabs USD oc get crontab USD oc get ct You can also view the raw YAML data for a CR: USD oc get <kind> -o yaml For example: USD oc get ct -o yaml Example output apiVersion: v1 items: - apiVersion: stable.example.com/v1 kind: CronTab metadata: clusterName: "" creationTimestamp: 2017-05-31T12:56:35Z deletionGracePeriodSeconds: null deletionTimestamp: null name: my-new-cron-object namespace: default resourceVersion: "285" selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object uid: 9423255b-4600-11e7-af6a-28d2447dc82b spec: cronSpec: '* * * * /5' 1 image: my-awesome-cron-image 2 1 2 Custom data from the YAML that you used to create the object displays.
[ "etcd ├── manifests │ ├── etcdcluster.crd.yaml │ └── etcdoperator.clusterserviceversion.yaml │ └── secret.yaml │ └── configmap.yaml └── metadata └── annotations.yaml └── dependencies.yaml", "annotations: operators.operatorframework.io.bundle.mediatype.v1: \"registry+v1\" 1 operators.operatorframework.io.bundle.manifests.v1: \"manifests/\" 2 operators.operatorframework.io.bundle.metadata.v1: \"metadata/\" 3 operators.operatorframework.io.bundle.package.v1: \"test-operator\" 4 operators.operatorframework.io.bundle.channels.v1: \"beta,stable\" 5 operators.operatorframework.io.bundle.channel.default.v1: \"stable\" 6", "dependencies: - type: olm.package value: packageName: prometheus version: \">0.27.0\" - type: olm.gvk value: group: etcd.database.coreos.com kind: EtcdCluster version: v1beta2", "Ignore everything except non-object .json and .yaml files **/* !*.json !*.yaml **/objects/*.json **/objects/*.yaml", "catalog ├── packageA │ └── index.yaml ├── packageB │ ├── .indexignore │ ├── index.yaml │ └── objects │ └── packageB.v0.1.0.clusterserviceversion.yaml └── packageC └── index.json", "_Meta: { // schema is required and must be a non-empty string schema: string & !=\"\" // package is optional, but if it's defined, it must be a non-empty string package?: string & !=\"\" // properties is optional, but if it's defined, it must be a list of 0 or more properties properties?: [... #Property] } #Property: { // type is required type: string & !=\"\" // value is required, and it must not be null value: !=null }", "#Package: { schema: \"olm.package\" // Package name name: string & !=\"\" // A description of the package description?: string // The package's default channel defaultChannel: string & !=\"\" // An optional icon icon?: { base64data: string mediatype: string } }", "#Channel: { schema: \"olm.channel\" package: string & !=\"\" name: string & !=\"\" entries: [...#ChannelEntry] } #ChannelEntry: { // name is required. It is the name of an `olm.bundle` that // is present in the channel. name: string & !=\"\" // replaces is optional. It is the name of bundle that is replaced // by this entry. It does not have to be present in the entry list. replaces?: string & !=\"\" // skips is optional. It is a list of bundle names that are skipped by // this entry. The skipped bundles do not have to be present in the // entry list. skips?: [...string & !=\"\"] // skipRange is optional. It is the semver range of bundle versions // that are skipped by this entry. skipRange?: string & !=\"\" }", "#Bundle: { schema: \"olm.bundle\" package: string & !=\"\" name: string & !=\"\" image: string & !=\"\" properties: [...#Property] relatedImages?: [...#RelatedImage] } #Property: { // type is required type: string & !=\"\" // value is required, and it must not be null value: !=null } #RelatedImage: { // image is the image reference image: string & !=\"\" // name is an optional descriptive name for an image that // helps identify its purpose in the context of the bundle name?: string & !=\"\" }", "#PropertyPackage: { type: \"olm.package\" value: { packageName: string & !=\"\" version: string & !=\"\" } }", "#PropertyGVK: { type: \"olm.gvk\" value: { group: string & !=\"\" version: string & !=\"\" kind: string & !=\"\" } }", "#PropertyPackageRequired: { type: \"olm.package.required\" value: { packageName: string & !=\"\" versionRange: string & !=\"\" } }", "#PropertyGVKRequired: { type: \"olm.gvk.required\" value: { group: string & !=\"\" version: string & !=\"\" kind: string & !=\"\" } }", "name: community-operators repo: quay.io/community-operators/catalog tag: latest references: - name: etcd-operator image: quay.io/etcd-operator/index@sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03 - name: prometheus-operator image: quay.io/prometheus-operator/index@sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317", "name=USD(yq eval '.name' catalog.yaml) mkdir \"USDname\" yq eval '.name + \"/\" + .references[].name' catalog.yaml | xargs mkdir for l in USD(yq e '.name as USDcatalog | .references[] | .image + \"|\" + USDcatalog + \"/\" + .name + \"/index.yaml\"' catalog.yaml); do image=USD(echo USDl | cut -d'|' -f1) file=USD(echo USDl | cut -d'|' -f2) opm render \"USDimage\" > \"USDfile\" done opm alpha generate dockerfile \"USDname\" indexImage=USD(yq eval '.repo + \":\" + .tag' catalog.yaml) docker build -t \"USDindexImage\" -f \"USDname.Dockerfile\" . docker push \"USDindexImage\"", "\\ufeffapiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: generation: 1 name: example-catalog 1 namespace: openshift-marketplace 2 annotations: olm.catalogImageTemplate: 3 \"quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}.{kube_patch_version}\" spec: displayName: Example Catalog 4 image: quay.io/example-org/example-catalog:v1 5 priority: -400 6 publisher: Example Org sourceType: grpc 7 updateStrategy: registryPoll: 8 interval: 30m0s status: connectionState: address: example-catalog.openshift-marketplace.svc:50051 lastConnect: 2021-08-26T18:14:31Z lastObservedState: READY 9 latestImageRegistryPoll: 2021-08-26T18:46:25Z 10 registryService: 11 createdAt: 2021-08-26T16:16:37Z port: 50051 protocol: grpc serviceName: example-catalog serviceNamespace: openshift-marketplace", "apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: example-operator namespace: example-namespace spec: channel: stable name: example-operator source: example-catalog sourceNamespace: openshift-marketplace", "registry.redhat.io/redhat/redhat-operator-index:v4.8", "registry.redhat.io/redhat/redhat-operator-index:v4.9", "apiVersion: operators.coreos.com/v1alpha1 kind: CatalogSource metadata: generation: 1 name: example-catalog namespace: openshift-marketplace annotations: olm.catalogImageTemplate: \"quay.io/example-org/example-catalog:v{kube_major_version}.{kube_minor_version}\" spec: displayName: Example Catalog image: quay.io/example-org/example-catalog:v1.22 priority: -400 publisher: Example Org", "quay.io/example-org/example-catalog:v1.22", "apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: example-operator namespace: example-namespace spec: channel: stable name: example-operator source: example-catalog sourceNamespace: openshift-marketplace", "apiVersion: operators.coreos.com/v1alpha1 kind: InstallPlan metadata: name: install-abcde namespace: operators spec: approval: Automatic approved: true clusterServiceVersionNames: - my-operator.v1.0.1 generation: 1 status: catalogSources: [] conditions: - lastTransitionTime: '2021-01-01T20:17:27Z' lastUpdateTime: '2021-01-01T20:17:27Z' status: 'True' type: Installed phase: Complete plan: - resolving: my-operator.v1.0.1 resource: group: operators.coreos.com kind: ClusterServiceVersion manifest: >- name: my-operator.v1.0.1 sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1alpha1 status: Created - resolving: my-operator.v1.0.1 resource: group: apiextensions.k8s.io kind: CustomResourceDefinition manifest: >- name: webservers.web.servers.org sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1beta1 status: Created - resolving: my-operator.v1.0.1 resource: group: '' kind: ServiceAccount manifest: >- name: my-operator sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1 status: Created - resolving: my-operator.v1.0.1 resource: group: rbac.authorization.k8s.io kind: Role manifest: >- name: my-operator.v1.0.1-my-operator-6d7cbc6f57 sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1 status: Created - resolving: my-operator.v1.0.1 resource: group: rbac.authorization.k8s.io kind: RoleBinding manifest: >- name: my-operator.v1.0.1-my-operator-6d7cbc6f57 sourceName: redhat-operators sourceNamespace: openshift-marketplace version: v1 status: Created", "packageName: example channels: - name: alpha currentCSV: example.v0.1.2 - name: beta currentCSV: example.v0.1.3 defaultChannel: alpha", "apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: name: etcdoperator.v0.9.2 namespace: placeholder annotations: spec: displayName: etcd description: Etcd Operator replaces: etcdoperator.v0.9.0 skips: - etcdoperator.v0.9.1", "olm.skipRange: <semver_range>", "apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: name: elasticsearch-operator.v4.1.2 namespace: <namespace> annotations: olm.skipRange: '>=4.1.0 <4.1.2'", "dependencies: - type: olm.package value: packageName: prometheus version: \">0.27.0\" - type: olm.gvk value: group: etcd.database.coreos.com kind: EtcdCluster version: v1beta2", "apiVersion: \"operators.coreos.com/v1alpha1\" kind: \"CatalogSource\" metadata: name: \"my-operators\" namespace: \"operators\" spec: sourceType: grpc image: example.com/my/operator-index:v1 displayName: \"My Operators\" priority: 100", "dependencies: - type: olm.package value: packageName: etcd version: \">3.1.0\" - type: olm.gvk value: group: etcd.database.coreos.com kind: EtcdCluster version: v1beta2", "apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: my-group namespace: my-namespace spec: targetNamespaces: - my-namespace", "apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: my-group namespace: my-namespace spec: selector: cool.io/prod: \"true\"", "apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: my-group namespace: my-namespace", "apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: annotations: olm.providedAPIs: PackageManifest.v1alpha1.packages.apps.redhat.com name: olm-operators namespace: local spec: selector: {} serviceAccount: metadata: creationTimestamp: null targetNamespaces: - local status: lastUpdated: 2019-02-19T16:18:28Z namespaces: - local", "apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: cluster-monitoring namespace: cluster-monitoring annotations: olm.providedAPIs: Alertmanager.v1.monitoring.coreos.com,Prometheus.v1.monitoring.coreos.com,PrometheusRule.v1.monitoring.coreos.com,ServiceMonitor.v1.monitoring.coreos.com spec: staticProvidedAPIs: true selector: matchLabels: something.cool.io/cluster-monitoring: \"true\"", "attenuated service account query failed - more than one operator group(s) are managing this namespace count=2", "apiVersion: operators.coreos.com/v1 kind: OperatorCondition metadata: name: my-operator namespace: operators spec: conditions: - type: Upgradeable 1 status: \"False\" 2 reason: \"migration\" message: \"The Operator is performing a migration.\" lastTransitionTime: \"2020-08-24T23:15:55Z\"", "apiVersion: config.openshift.io/v1 kind: OperatorHub metadata: name: cluster spec: disableAllDefaultSources: true 1 sources: [ 2 { name: \"community-operators\", disabled: false } ]", "registry.redhat.io/redhat/redhat-operator-index:v4.8", "registry.redhat.io/redhat/redhat-operator-index:v4.9", "apiVersion: apiextensions.k8s.io/v1 1 kind: CustomResourceDefinition metadata: name: crontabs.stable.example.com 2 spec: group: stable.example.com 3 versions: name: v1 4 scope: Namespaced 5 names: plural: crontabs 6 singular: crontab 7 kind: CronTab 8 shortNames: - ct 9", "oc create -f <file_name>.yaml", "/apis/<spec:group>/<spec:version>/<scope>/*/<names-plural>/", "/apis/stable.example.com/v1/namespaces/*/crontabs/", "kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 1 metadata: name: aggregate-cron-tabs-admin-edit 2 labels: rbac.authorization.k8s.io/aggregate-to-admin: \"true\" 3 rbac.authorization.k8s.io/aggregate-to-edit: \"true\" 4 rules: - apiGroups: [\"stable.example.com\"] 5 resources: [\"crontabs\"] 6 verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\", \"delete\", \"deletecollection\"] 7 --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: aggregate-cron-tabs-view 8 labels: # Add these permissions to the \"view\" default role. rbac.authorization.k8s.io/aggregate-to-view: \"true\" 9 rbac.authorization.k8s.io/aggregate-to-cluster-reader: \"true\" 10 rules: - apiGroups: [\"stable.example.com\"] 11 resources: [\"crontabs\"] 12 verbs: [\"get\", \"list\", \"watch\"] 13", "oc create -f <file_name>.yaml", "apiVersion: \"stable.example.com/v1\" 1 kind: CronTab 2 metadata: name: my-new-cron-object 3 finalizers: 4 - finalizer.stable.example.com spec: 5 cronSpec: \"* * * * /5\" image: my-awesome-cron-image", "oc create -f <file_name>.yaml", "oc get <kind>", "oc get crontab", "NAME KIND my-new-cron-object CronTab.v1.stable.example.com", "oc get crontabs", "oc get crontab", "oc get ct", "oc get <kind> -o yaml", "oc get ct -o yaml", "apiVersion: v1 items: - apiVersion: stable.example.com/v1 kind: CronTab metadata: clusterName: \"\" creationTimestamp: 2017-05-31T12:56:35Z deletionGracePeriodSeconds: null deletionTimestamp: null name: my-new-cron-object namespace: default resourceVersion: \"285\" selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object uid: 9423255b-4600-11e7-af6a-28d2447dc82b spec: cronSpec: '* * * * /5' 1 image: my-awesome-cron-image 2", "apiVersion: \"stable.example.com/v1\" 1 kind: CronTab 2 metadata: name: my-new-cron-object 3 finalizers: 4 - finalizer.stable.example.com spec: 5 cronSpec: \"* * * * /5\" image: my-awesome-cron-image", "oc create -f <file_name>.yaml", "oc get <kind>", "oc get crontab", "NAME KIND my-new-cron-object CronTab.v1.stable.example.com", "oc get crontabs", "oc get crontab", "oc get ct", "oc get <kind> -o yaml", "oc get ct -o yaml", "apiVersion: v1 items: - apiVersion: stable.example.com/v1 kind: CronTab metadata: clusterName: \"\" creationTimestamp: 2017-05-31T12:56:35Z deletionGracePeriodSeconds: null deletionTimestamp: null name: my-new-cron-object namespace: default resourceVersion: \"285\" selfLink: /apis/stable.example.com/v1/namespaces/default/crontabs/my-new-cron-object uid: 9423255b-4600-11e7-af6a-28d2447dc82b spec: cronSpec: '* * * * /5' 1 image: my-awesome-cron-image 2" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.9/html/operators/understanding-operators
4.176. mesa
4.176. mesa 4.176.1. RHBA-2011:1616 - mesa bug fix and enhancement update Updated mesa packages that fix multiple bugs and add various enhancements are now available for Red Hat Enterprise Linux 6. Mesa provides a 3D graphics application programming interface (API) that is compatible with OpenGL (Open Graphics Library). It also provides hardware-accelerated drivers for many popular graphics chips. The mesa packages have been upgraded to upstream version 7.11, which provides a number of bug fixes and enhancements over the version. (BZ# 713772 ) Bug Fixes BZ# 677470 Prior to this update, the OpenGL output was corrupted due to problems with the rendering in guests. This update modifies the software rendering so that the OpenGL output is no longer corrupted. BZ# 745686 Prior to this update,the nouveau gallium driver was wrongly included in the mesa-dri-drivers package which could lead to conflicts. This update corrects this error and removes the nouveau gallium driver from the package. All Mesa users are advised to upgrade to these updated packages, which fix these bugs add these enhancements.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.2_technical_notes/mesa
Chapter 8. Internationalization
Chapter 8. Internationalization 8.1. Input Methods The default input framework for the GNOME Desktop in Red Hat Enterprise Linux 7 is IBus (Intelligent Input Bus). It integrates with GNOME 3 and includes a user interface for input method selection. 8.1.1. Configuring and Switching Input Methods Users can use the Region & Language panel in the GNOME Settings to configure their input methods. More information on using input methods can be found in GNOME Help. To access it, press the Super key to enter the Activities Overview , type help , and then press Enter . For non-GNOME sessions, IBus can configure both XKB layouts and input methods in the ibus-setup tool and switch them with a shortcut. The default shortcut to switch input sources is Super + Space . In Red Hat Enterprise Linux 6, the shortcut was Ctrl + Space . 8.1.2. Predictive Input Method for IBus ibus-typing-booster is a predictive input method for the IBus platform. It predicts complete words based on partial input, allowing for faster and more accurate text input. Users can select the required word from a list of suggestions. ibus-typing-booster can also use Hunspell dictionaries to make suggestions for a language. 8.1.3. IBus in the GNOME Desktop Replaces im-chooser Because IBus is now integrated with the GNOME Desktop, im-chooser is deprecated except for using non-IBus input methods.
null
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/desktop_migration_and_administration_guide/internationalization
Chapter 8. Executing your content with automation content navigator
Chapter 8. Executing your content with automation content navigator Now that you have your automation execution environments built, you can use automation content navigator to validate that the content will run in the same manner as the automation controller will run it. 8.1. Running Ansible playbooks with automation content navigator As a content creator, you can execute your Ansible playbooks with automation content navigator and interactively delve into the results of each play and task to verify or troubleshoot the playbook. You can also execute your Ansible playbooks inside an execution environment and without an execution environment to compare and troubleshoot any problems. 8.1.1. Executing a playbook from automation content navigator You can run Ansible playbooks with the automation content navigator text-based user interface to follow the execution of the tasks and delve into the results of each task. Prerequisites A playbook. A valid inventory file if not using localhost or an inventory plugin. Procedure Start automation content navigator USD ansible-navigator Run the playbook. USD :run Optional: type ansible-navigator run simple-playbook.yml -i inventory.yml to run the playbook. Verify or add the inventory and any other command line parameters. INVENTORY OR PLAYBOOK NOT FOUND, PLEASE CONFIRM THE FOLLOWING ───────────────────────────────────────────────────────────────────────── Path to playbook: /home/ansible-navigator_demo/simple_playbook.yml Inventory source: /home/ansible-navigator-demo/inventory.yml Additional command line parameters: Please provide a value (optional) ────────────────────────────────────────────────────────────────────────── Submit Cancel Tab to Submit and hit Enter. You should see the tasks executing. Type the number to a play to step into the play results, or type :<number> for numbers above 9. Notice failed tasks show up in red if you have colors enabled for automation content navigator. Type the number to a task to review the task results, or type :<number> for numbers above 9. Optional: type :doc bring up the documentation for the module or plugin used in the task to aid in troubleshooting. ANSIBLE.BUILTIN.PACKAGE_FACTS (MODULE) 0│--- 1│doc: 2│ author: 3│ - Matthew Jones (@matburt) 4│ - Brian Coca (@bcoca) 5│ - Adam Miller (@maxamillion) 6│ collection: ansible.builtin 7│ description: 8│ - Return information about installed packages as facts. <... output omitted ...> 11│ module: package_facts 12│ notes: 13│ - Supports C(check_mode). 14│ options: 15│ manager: 16│ choices: 17│ - auto 18│ - rpm 19│ - apt 20│ - portage 21│ - pkg 22│ - pacman <... output truncated ...> Additional resources ansible-playbook Ansible playbooks 8.1.2. Reviewing playbook results with an automation content navigator artifact file Automation content navigator saves the results of the playbook run in a JSON artifact file. You can use this file to share the playbook results with someone else, save it for security or compliance reasons, or review and troubleshoot later. You only need the artifact file to review the playbook run. You do not need access to the playbook itself or inventory access. Prerequisites A automation content navigator artifact JSON file from a playbook run. Procedure Start automation content navigator with the artifact file. USD ansible-navigator replay simple_playbook_artifact.json Review the playbook results that match when the playbook ran. You can now type the number to the plays and tasks to step into each to review the results, as you would after executing the playbook. Additional resources ansible-playbook Ansible playbooks
[ "ansible-navigator", ":run", "INVENTORY OR PLAYBOOK NOT FOUND, PLEASE CONFIRM THE FOLLOWING ───────────────────────────────────────────────────────────────────────── Path to playbook: /home/ansible-navigator_demo/simple_playbook.yml Inventory source: /home/ansible-navigator-demo/inventory.yml Additional command line parameters: Please provide a value (optional) ────────────────────────────────────────────────────────────────────────── Submit Cancel", "ANSIBLE.BUILTIN.PACKAGE_FACTS (MODULE) 0│--- 1│doc: 2│ author: 3│ - Matthew Jones (@matburt) 4│ - Brian Coca (@bcoca) 5│ - Adam Miller (@maxamillion) 6│ collection: ansible.builtin 7│ description: 8│ - Return information about installed packages as facts. <... output omitted ...> 11│ module: package_facts 12│ notes: 13│ - Supports C(check_mode). 14│ options: 15│ manager: 16│ choices: 17│ - auto 18│ - rpm 19│ - apt 20│ - portage 21│ - pkg 22│ - pacman <... output truncated ...>", "ansible-navigator replay simple_playbook_artifact.json" ]
https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.4/html/red_hat_ansible_automation_platform_creator_guide/executing-content-navigator
Chapter 2. About the Migration Toolkit for Runtimes
Chapter 2. About the Migration Toolkit for Runtimes What is the Migration Toolkit for Runtimes? The Migration Toolkit for Runtimes (MTR) is an extensible and customizable rule-based tool that simplifies the migration and modernization of Java applications. MTR examines application artifacts, including project source directories and application archives, and then produces an HTML report highlighting areas needing changes. MTR supports many migration paths, including the following examples: Upgrading to the latest release of Red Hat JBoss Enterprise Application Platform Migrating from Oracle WebLogic or IBM WebSphere Application Server to Red Hat JBoss Enterprise Application Platform Containerizing applications and making them cloud-ready Migrating from Java Spring Boot to Quarkus Updating from Oracle JDK to OpenJDK Upgrading from OpenJDK 8 to OpenJDK 11 Upgrading from OpenJDK 11 to OpenJDK 17 Upgrading from OpenJDK 17 to OpenJDK 21 Migrating EAP Java applications to Azure Migrating Spring Boot Java applications to Azure For more information about use cases and migration paths, see the MTR for developers web page. How does the Migration Toolkit for Runtimes simplify migration? The Migration Toolkit for Runtimes looks for common resources and known trouble spots when migrating applications. It provides a high-level view of the technologies used by the application. MTR generates a detailed report evaluating a migration or modernization path. This report can help you to estimate the effort required for large-scale projects and to reduce the work involved. 2.1. MTR Features The Migration Toolkit for Runtimes (MTR) provides a number of capabilities to assist with planning and executing migration projects. Planning and work estimation MTR assists project managers by detailing the type of work and estimation of effort to complete the tasks. Level of effort is represented in MTR reports as story points. Actual estimates will be based on the skills required and the classification of migration work needed. Identifying migration issues and providing solutions MTR identifies migration issues and highlights specific points in the code where an issue occurs. MTR suggests code changes and provides additional resources to help engineers resolve the specific issue. Detailed reporting MTR produces numerous reports to give both high-level views of the migration effort and details of specific migration tasks. You can view migration issues across all applications, charts and summary information about issues in an application, a breakdown of issues by module in the application, reports of technologies used, and dependencies on other applications and services. You can also examine source files to see the line of code where an issue occurs. See the CLI Guide for more information on the available MTR reports. Built-in rules and migration paths MTR comes with a core set of rules to provide migration assistance for several common migration paths. These rules identify the use of proprietary functionality from other application servers or deprecated subsystems from versions of JBoss EAP. MTR also contains rules to identify common migration issues, such as hard-coded IP addresses and JNDI lookups. Rule extensibility and customization MTR provides the ability to create powerful and complex rules. You can expand upon the core set of rules provided by MTR and create rules to identify additional issues that are important for your migration project. You can also override core rules and create custom rule categories. See the Rule Development Guide for more information on customizing MTR rules. Ability to analyze source code or application archives MTR can evaluate application archives or source code, and can evaluate multiple applications together. It can identify archives that are shared across multiple applications, which can help with more accurate effort estimation. 2.2. The MTR rules The Migration Toolkit for Runtimes (MTR) contains rule-based migration tools (analyzers) that you can use to analyze the application user interfaces (APIs), technologies, and architectures used by the applications you plan to migrate. MTR analyzer rules use the following rule pattern: You can use the MTR rules internally to perform the following tasks: Extract files from archives. Decompile files. Scan and classify file types. Analyze XML and other file content. Analyze the application code. Build the reports. MTR builds a data model based on the rule execution results and stores component data and relationships in a graph database. This database can then be queried and updated as required by the migration rules and for reporting purposes. Note You can create your own custom analyzer rules. You can use custom rules to identify the use of custom libraries or other components that might not be covered by the provided standard migration rules. For instructions on how to write custom rules, see [ Rule Development Guide ].
[ "when(condition) message(message) tag(tags)" ]
https://docs.redhat.com/en/documentation/migration_toolkit_for_runtimes/1.2/html/introduction_to_the_migration_toolkit_for_runtimes/what-is-the-toolkit_getting-started-guide
Chapter 4. InstallPlan [operators.coreos.com/v1alpha1]
Chapter 4. InstallPlan [operators.coreos.com/v1alpha1] Description InstallPlan defines the installation of a set of operators. Type object Required metadata spec 4.1. Specification Property Type Description apiVersion string APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources kind string Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds metadata ObjectMeta Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata spec object InstallPlanSpec defines a set of Application resources to be installed status object InstallPlanStatus represents the information about the status of steps required to complete installation. Status may trail the actual state of a system. 4.1.1. .spec Description InstallPlanSpec defines a set of Application resources to be installed Type object Required approval approved clusterServiceVersionNames Property Type Description approval string Approval is the user approval policy for an InstallPlan. It must be one of "Automatic" or "Manual". approved boolean clusterServiceVersionNames array (string) generation integer source string sourceNamespace string 4.1.2. .status Description InstallPlanStatus represents the information about the status of steps required to complete installation. Status may trail the actual state of a system. Type object Required catalogSources phase Property Type Description attenuatedServiceAccountRef object AttenuatedServiceAccountRef references the service account that is used to do scoped operator install. bundleLookups array BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster. bundleLookups[] object BundleLookup is a request to pull and unpackage the content of a bundle to the cluster. catalogSources array (string) conditions array conditions[] object InstallPlanCondition represents the overall status of the execution of an InstallPlan. message string Message is a human-readable message containing detailed information that may be important to understanding why the plan has its current status. phase string InstallPlanPhase is the current status of a InstallPlan as a whole. plan array plan[] object Step represents the status of an individual step in an InstallPlan. startTime string StartTime is the time when the controller began applying the resources listed in the plan to the cluster. 4.1.3. .status.attenuatedServiceAccountRef Description AttenuatedServiceAccountRef references the service account that is used to do scoped operator install. Type object Property Type Description apiVersion string API version of the referent. fieldPath string If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. kind string Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds name string Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names namespace string Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ resourceVersion string Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency uid string UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids 4.1.4. .status.bundleLookups Description BundleLookups is the set of in-progress requests to pull and unpackage bundle content to the cluster. Type array 4.1.5. .status.bundleLookups[] Description BundleLookup is a request to pull and unpackage the content of a bundle to the cluster. Type object Required catalogSourceRef identifier path replaces Property Type Description catalogSourceRef object CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from. conditions array Conditions represents the overall state of a BundleLookup. conditions[] object identifier string Identifier is the catalog-unique name of the operator (the name of the CSV for bundles that contain CSVs) path string Path refers to the location of a bundle to pull. It's typically an image reference. properties string The effective properties of the unpacked bundle. replaces string Replaces is the name of the bundle to replace with the one found at Path. 4.1.6. .status.bundleLookups[].catalogSourceRef Description CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from. Type object Property Type Description apiVersion string API version of the referent. fieldPath string If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future. kind string Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds name string Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names namespace string Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ resourceVersion string Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency uid string UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids 4.1.7. .status.bundleLookups[].conditions Description Conditions represents the overall state of a BundleLookup. Type array 4.1.8. .status.bundleLookups[].conditions[] Description Type object Required status type Property Type Description lastTransitionTime string Last time the condition transitioned from one status to another. lastUpdateTime string Last time the condition was probed. message string A human readable message indicating details about the transition. reason string The reason for the condition's last transition. status string Status of the condition, one of True, False, Unknown. type string Type of condition. 4.1.9. .status.conditions Description Type array 4.1.10. .status.conditions[] Description InstallPlanCondition represents the overall status of the execution of an InstallPlan. Type object Property Type Description lastTransitionTime string lastUpdateTime string message string reason string ConditionReason is a camelcased reason for the state transition. status string type string InstallPlanConditionType describes the state of an InstallPlan at a certain point as a whole. 4.1.11. .status.plan Description Type array 4.1.12. .status.plan[] Description Step represents the status of an individual step in an InstallPlan. Type object Required resolving resource status Property Type Description optional boolean resolving string resource object StepResource represents the status of a resource to be tracked by an InstallPlan. status string StepStatus is the current status of a particular resource an in InstallPlan 4.1.13. .status.plan[].resource Description StepResource represents the status of a resource to be tracked by an InstallPlan. Type object Required group kind name sourceName sourceNamespace version Property Type Description group string kind string manifest string name string sourceName string sourceNamespace string version string 4.2. API endpoints The following API endpoints are available: /apis/operators.coreos.com/v1alpha1/installplans GET : list objects of kind InstallPlan /apis/operators.coreos.com/v1alpha1/namespaces/{namespace}/installplans DELETE : delete collection of InstallPlan GET : list objects of kind InstallPlan POST : create an InstallPlan /apis/operators.coreos.com/v1alpha1/namespaces/{namespace}/installplans/{name} DELETE : delete an InstallPlan GET : read the specified InstallPlan PATCH : partially update the specified InstallPlan PUT : replace the specified InstallPlan /apis/operators.coreos.com/v1alpha1/namespaces/{namespace}/installplans/{name}/status GET : read status of the specified InstallPlan PATCH : partially update status of the specified InstallPlan PUT : replace status of the specified InstallPlan 4.2.1. /apis/operators.coreos.com/v1alpha1/installplans HTTP method GET Description list objects of kind InstallPlan Table 4.1. HTTP responses HTTP code Reponse body 200 - OK InstallPlanList schema 401 - Unauthorized Empty 4.2.2. /apis/operators.coreos.com/v1alpha1/namespaces/{namespace}/installplans HTTP method DELETE Description delete collection of InstallPlan Table 4.2. HTTP responses HTTP code Reponse body 200 - OK Status schema 401 - Unauthorized Empty HTTP method GET Description list objects of kind InstallPlan Table 4.3. HTTP responses HTTP code Reponse body 200 - OK InstallPlanList schema 401 - Unauthorized Empty HTTP method POST Description create an InstallPlan Table 4.4. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 4.5. Body parameters Parameter Type Description body InstallPlan schema Table 4.6. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 201 - Created InstallPlan schema 202 - Accepted InstallPlan schema 401 - Unauthorized Empty 4.2.3. /apis/operators.coreos.com/v1alpha1/namespaces/{namespace}/installplans/{name} Table 4.7. Global path parameters Parameter Type Description name string name of the InstallPlan HTTP method DELETE Description delete an InstallPlan Table 4.8. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed Table 4.9. HTTP responses HTTP code Reponse body 200 - OK Status schema 202 - Accepted Status schema 401 - Unauthorized Empty HTTP method GET Description read the specified InstallPlan Table 4.10. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 401 - Unauthorized Empty HTTP method PATCH Description partially update the specified InstallPlan Table 4.11. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 4.12. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 401 - Unauthorized Empty HTTP method PUT Description replace the specified InstallPlan Table 4.13. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 4.14. Body parameters Parameter Type Description body InstallPlan schema Table 4.15. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 201 - Created InstallPlan schema 401 - Unauthorized Empty 4.2.4. /apis/operators.coreos.com/v1alpha1/namespaces/{namespace}/installplans/{name}/status Table 4.16. Global path parameters Parameter Type Description name string name of the InstallPlan HTTP method GET Description read status of the specified InstallPlan Table 4.17. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 401 - Unauthorized Empty HTTP method PATCH Description partially update status of the specified InstallPlan Table 4.18. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 4.19. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 401 - Unauthorized Empty HTTP method PUT Description replace status of the specified InstallPlan Table 4.20. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 4.21. Body parameters Parameter Type Description body InstallPlan schema Table 4.22. HTTP responses HTTP code Reponse body 200 - OK InstallPlan schema 201 - Created InstallPlan schema 401 - Unauthorized Empty
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.16/html/operatorhub_apis/installplan-operators-coreos-com-v1alpha1
Providing feedback on Red Hat documentation
Providing feedback on Red Hat documentation We appreciate and prioritize your feedback regarding our documentation. Provide as much detail as possible, so that your request can be quickly addressed. Prerequisites You are logged in to the Red Hat Customer Portal. Procedure To provide feedback, perform the following steps: Click the following link: Create Issue . Describe the issue or enhancement in the Summary text box. Provide details about the issue or requested enhancement in the Description text box. Type your name in the Reporter text box. Click the Create button. This action creates a documentation ticket and routes it to the appropriate documentation team. Thank you for taking the time to provide feedback.
null
https://docs.redhat.com/en/documentation/cost_management_service/1-latest/html/limiting_access_to_cost_management_resources/proc-providing-feedback-on-redhat-documentation
Chapter 2. Power management auditing and analysis
Chapter 2. Power management auditing and analysis 2.1. Audit and analysis overview The detailed manual audit, analysis, and tuning of a single system is usually the exception because the time and cost spent to do so typically outweighs the benefits gained from these last pieces of system tuning. However, performing these tasks once for a large number of nearly identical systems where you can reuse the same settings for all systems can be very useful. For example, consider the deployment of thousands of desktop systems, or a HPC cluster where the machines are nearly identical. Another reason to do auditing and analysis is to provide a basis for comparison against which you can identify regressions or changes in system behavior in the future. The results of this analysis can be very helpful in cases where hardware, BIOS, or software updates happen regularly and you want to avoid any surprises with regard to power consumption. Generally, a thorough audit and analysis gives you a much better idea of what is really happening on a particular system. Auditing and analyzing a system with regard to power consumption is relatively hard, even with the most modern systems available. Most systems do not provide the necessary means to measure power use via software. Exceptions exist though: the ILO management console of Hewlett Packard server systems has a power management module that you can access through the web. IBM provides a similar solution in their BladeCenter power management module. On some Dell systems, the IT Assistant offers power monitoring capabilities as well. Other vendors are likely to offer similar capabilities for their server platforms, but as can be seen there is no single solution available that is supported by all vendors. If your system has no inbuilt mechanism to measure power consumption, a few other choices exist. You could install a special power supply for your system that offers power consumption information through USB. The Gigabyte Odin GT 550 W PC power supply is one such example. As a last resort, some external watt meters like the Watts up? PRO have a USB connector. Direct measurements of power consumption is often only necessary to maximize savings as far as possible. Fortunately, other means are available to measure if changes are in effect or how the system is behaving. This chapter describes the necessary tools.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/power_management_guide/Audit_and_Analysis
E.3.5. /proc/fs
E.3.5. /proc/fs This directory shows which file systems are exported. If running an NFS server, typing cat /proc/fs/nfsd/exports displays the file systems being shared and the permissions granted for those file systems. For more on file system sharing with NFS, see the Network File System (NFS) chapter of the Storage Administration Guide .
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-dir-fs
4.5. Security
4.5. Security TPM TPM (Trusted Platform Module) hardware can create, store and use RSA keys securely (without ever being exposed in memory), verify a platform's software state using cryptographic hashes and more. The trousers and tpm-tools packages are considered a Technology Preview. Packages: trousers-0.3.4-4 , tpm-tools-1.3.4-2
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.6_technical_notes/security_tp
Chapter 5. RHEL 8.2.0 release
Chapter 5. RHEL 8.2.0 release 5.1. New features This part describes new features and major enhancements introduced in Red Hat Enterprise Linux 8.2. 5.1.1. Installer and image creation Ability to register your system, attach RHEL subscriptions, and install from the Red Hat CDN In RHEL 8.2, you can register your system, attach RHEL subscriptions, and install from the Red Hat Content Delivery Network (CDN) before package installation. Interactive GUI installations, as well as automated Kickstart installations, support this feature. Benefits include: The use of the smaller Boot ISO image file removes the need to download the larger Binary DVD ISO image file. The CDN uses the latest packages that result in a fully subscribed and up-to-date system immediately after installation. There is no requirement to install package updates after installation. Registration is performed before package installation, resulting in a shorter and more streamlined installation process. Integrated support for Red Hat Insights is available. (BZ#1748281) Ability to register your system to Red Hat Insights during installation In RHEL 8.2, you can register your system to Red Hat Insights during installation. Interactive GUI installations, as well as automated Kickstart installations, support this feature. Benefits include: Easier to identify, prioritize, and resolve issues before business operations are affected. Proactively identify and remediate threats to security, performance, availability, and stability with predictive analytics. Avoid problems and unplanned downtime in your environment. ( BZ#1746391 ) Image Builder now offers cloud-init support for creating Azure images With this enhancement, cloud-init support is available for Azure images created by Image Builder. As a result, the creation of on-premise images with fast-provisioning and the ability to add custom data is available to customers. ( BZ#1754711 ) Added new kickstart commands: rhsm and zipl With this release, the following kickstart commands are added: rhsm : Use the rhsm command to register system with Red Hat during installation. zipl : Use the zipl command to specify zipl configuration on IBM Z systems. (BZ#1972214) 5.1.2. Software management User-Agent header string now includes information read from the /etc/os-release file With this enhancement, the User-Agent header string, which is normally included with the HTTP requests made by DNF, has been extended with information read from the /etc/os-release file. To obtain more information, see user_agent in the dnf.conf(5) man page. ( BZ#1676891 ) All dnf-automatic.timer timer units now use the real-time clock by default Previously, the dnf-automatic.timer timer units used the monotonic clock, which resulted in unpredictable activation time after the system boot. With this update, the timer units run between 6 a.m. and 7 a.m. If the system is off during that time, the timer units are activated within one hour after the system boot. ( BZ#1754609 ) The createrepo_c utility now skips packages whose metadata contains the disallowed control characters To ensure a valid XML, the package metadata must not contain any control characters, with the exception of: the horizontal tab the newline character the carriage return character With this update, the createrepo_c utility does not include packages with metadata containing disallowed control characters in a newly created repository, and returns the following error message: ( BZ#1743186 ) 5.1.3. Shells and command-line tools opencv rebased to version 3.4.6 The opencv packages have been upgraded to upstream version 3.4.6. Notable changes include: Support for new Open CL parameters, such as OPENCV_OPENCL_BUILD_EXTRA_OPTIONS and OPENCV_OPENCL_DEVICE_MAX_WORK_GROUP_SIZE . The objdetect module now supports QR code detection algorithm. Multiple new methods, such as MatSize::dims or VideoCapture::getBackendName . Multiple new functions, such as drawFrameAxes or getVersionMajor . Various performance improvements, including improvements of the GaussianBlur function, v_load_deinterleave and v_store_interleave intrinsics when using SSSE3 instructions. ( BZ#1694647 ) 5.1.4. Infrastructure services graphviz-python3 is now distributed in the CRB repository This update adds the graphviz-python3 package to RHEL 8. The package provides bindings required for usage of the Graphviz graph visualization software from Python. Note that the graphviz-python3 package is distributed in the unsupported CodeReady Linux Builder repository (CRB) . ( BZ#1704875 ) tuned rebased to version 2.13.0 The tuned packages have been upgraded to upstream version 2.13.0. Notable enhancements include: Architecture-dependant tuning framework has been added. Support for multiple include directives has been added. Tuning in the sap-hana , latency-performance , and realtime profiles has been updated. ( BZ#1738250 ) powertop rebased to version 2.11 The powertop package has been upgraded to version 2.11, which provides a following notable change: Support for the EHL, TGL, ICL/ICX platforms (BZ#1716721) BIND now supports .GeoIP2 instead of GeoLite Legacy GeoIP The GeoLite Legacy GeoIP library is no longer supported in BIND. With this update, GeoLite Legacy GeoP has been replaced with GeoIP2, which is provided in the libmaxminddb data format. Note that the new format may require some configuration changes, and the format also does not support following legacy GeoIP access control list (ACL) settings: geoip netspeed geoip org ISO 3166 Alpha-3 country codes (BZ#1564443) stale-answer now provides old cached records in case of DDoS attack Previously, the Distributed Denial of Service (DDoS) attack caused the authoritative servers to fail with the SERVFAIL error. With this update, the stale-answer functionality provides the expired records until a fresh response is obtained. To enable or disable the serve-stale feature, use either of these: Configuration file Remote control channel (rndc) ( BZ#1664863 ) BIND rebased to version 9.11.13 The bind packages have been upgraded to version 9.11.13. Notable changes include: The tcp-highwater statistics variable has been added. This variable shows maximum concurrent TCP clients recorded during a run. The SipHash-2-4 -based DNS Cookies (RFC 7873) algorithm has been added. Glue addresses for rooting priming queries are returned regardless of how the minimal-responses configuration option is set. The named-checkconf command now ensures the validity of the DNS64 network prefixes. Automatic rollover per RFC 5011 no longer fails when the trusted-keys and managed-keys statements are both configured for the same name. Instead, a warning message is logged. Internationalized Domain Name (IDN) processing in the dig and nslookup utilities is now disabled by default when they are not run on terminal (for example, in a script). IDN processing in dig can be switched on by using the +idnin and +idnout options. ( BZ#1704328 ) 5.1.5. Security RHEL 8 now contains the DISA STIG profile Security Technical Implementation Guides (STIG) are a set of baseline recommendations published by the Defense Information Systems Agency (DISA) to harden the security of information systems and software that might otherwise be vulnerable. This release includes the profile and Kickstart file for this security policy. With this enhancement, users can check systems for compliance, remediate systems to be compliant, and install systems compliant with DISA STIG for Red Hat Enterprise Linux 8. ( BZ#1755447 ) crypto-policies can now be customized With this update, you can adjust certain algorithms or protocols of any policy level or set a new complete policy file as the current system-wide cryptographic policy. This enables administrators to customize the system-wide cryptographic policy as required by different scenarios. RPM packages should store policies provided by them in the /usr/share/crypto-policies/policies directory. The /etc/crypto-policies/policies directory contains local custom policies. For more information, see the Custom Policies section in the update-crypto-policies(8) man page and the Crypto Policy Definition Format section in the update-crypto-policies(8) man page. (BZ#1690565) SCAP Security Guide now supports ACSC Essential Eight The scap-security-guide packages now provide the Australian Cyber Security Centre (ACSC) Essential Eight compliance profile and a corresponding Kickstart file. With this enhancement, users can install a system that conforms with this security baseline. Furthermore, you can use the OpenSCAP suite for checking security compliance and remediation using this specification of minimum security controls defined by ACSC. ( BZ#1755194 ) oscap-podman for security and compliance scanning of containers is now available This update of the openscap packages introduces a new utility for security and compliance scanning of containers. The oscap-podman tool provides an equivalent of the oscap-docker utility that serves for scanning container and container images in RHEL 7. (BZ#1642373) setroubleshoot can now analyze and react to execmem access denials This update introduces a new setroubleshoot plugin. The plugin can analyze execmem access denials (AVCs) and provide relevant advice. As a result, setroubleshoot can now suggest a possibility to switch a boolean if it allows access, or report the issue when no boolean can allow access. (BZ#1649842) New packages: setools-gui and setools-console-analyses The setools-gui package, which has been part of RHEL 7, is now being introduced to RHEL 8. Graphical tools help inspect relations and data flows especially in multi-level systems with highly specialized SELinux policies. With the apol graphical tool from the setools-gui package, you can inspect and analyze aspects of an SELinux policy. Tools from the setools-console-analyses package enable you to analyze domain transitions and SELinux policy information flows. ( BZ#1731519 ) Confined users in SELinux can now manage user session services Previously, confined users were not able to manage user session services. As a result, they could not execute systemctl --user or busctl --user commands or work in the RHEL web console. With this update, confined users can manage user sessions. ( BZ#1727887 ) The lvmdbusd service is now confined by SELinux The lvmdbusd service provides a D-Bus API to the logical volume manager (LVM). Previously, the lvmdbusd daemon could not transition to the lvm_t context even though the SELinux policy for lvm_t was defined. As a consequence, the lvmdbusd daemon was executed in the unconfined_service_t domain and SELinux labeled lvmdbusd as unconfined. With this update, the lvmdbusd executable file has the lvm_exec_t context defined and lvmdbusd can now be used correctly with SELinux in enforcing mode. ( BZ#1726166 ) semanage now supports listing and modifying SCTP and DCCP ports. Previously, semanage port allowed listing and modifying of only TCP and UDP ports. This update adds SCTP and DCCP protocol support to semanage port . As a result, administrators can now check if two machines can communicate via SCTP and fully enable SCTP features to successfully deploy SCTP-based applications. (BZ#1563742) semanage export now shows customizations related to permissive domains With this update, the semanage utility, which is part of the policycoreutils package for SELinux, is able to display customizations related to permissive domains. System administrators can now transfer permissive local modifications between machines using the semanage export command. (BZ#1417455) udica can add new allow rules generated from SELinux denials to existing container policy When a container that is running under a policy generated by the udica utility triggers an SELinux denial, udica is now able to update the policy. The new parameter -a or --append-rules can be used to append rules from an AVC file. ( BZ#1732704 ) New SELinux types enable services to run confined This update introduces new SELinux types that enable the following services to run as confined services in SELinux enforcing mode instead of running in the unconfined_service_t domain: lldpd now runs as lldpad_t rrdcached now runs as rrdcached_t stratisd now runs as stratisd_t timedatex now runs as timedatex_t ( BZ#1726246 , BZ#1726255 , BZ#1726259 , BZ#1730204 ) Clevis is able to list policies in place for a given LUKS device With this update, the clevis luks list command lists PBD policies in place for a given LUKS device. This makes it easier to find information on Clevis pins in use and pin configuration, for example, Tang server addresses, details on tpm2 policies, and SSS thresholds. ( BZ#1766526 ) Clevis provides new commands for reporting key status and rebinding expired keys The clevis luks report command now provides a simple way to report whether keys for a particular binding require rotation. Regular key rotations in a Tang server improve the security of Network-Bound Disk Encryption (NBDE) deployments, and therefore the client should provide detection of expired keys. If the key is expired, Clevis suggests using the clevis luks regen command which rebinds the expired key slot with a current key. This significantly simplifies the process of key rotation. (BZ#1564559, BZ#1564566) Clevis can now extract the passphrase used for binding a particular slot in a LUKS device With this update to the Clevis policy-based decryption framework, you can now extract the passphrase used for binding a particular slot in a LUKS device. Previously, if the LUKS installation passphrase was erased, Clevis could not perform LUKS administrative tasks, such as re-encryption, enabling a new key slot with a user passphrase, and re-binding Clevis when the administrator needs to change the sss threshold. This update introduces the clevis luks pass command that shows the passphrase used for binding a particular slot. (BZ#1436780) Clevis now provides improved support for decrypting multiple LUKS devices on boot The clevis packages have been updated to provide better support for decrypting multiple LUKS-encrypted devices on boot. Prior to this improvement, the administrator had to perform complicated changes to the system configuration to enable the proper decryption of multiple devices by Clevis on boot. With this release, you can set up the decryption by using the clevis luks bind command and updating the initramfs through the dracut -fv --regenerate-all command. For more details, see the Configuring automated unlocking of encrypted volumes using policy-based decryption section. ( BZ#1784524 ) openssl-pkcs11 rebased to 0.4.10 The openssl-pkcs11 package has been upgraded to upstream version 0.4.10, which provides many bug fixes and enhancements over the version. The openssl-pkcs11 package provides access to PKCS #11 modules through the engine interface. The major changes introduced by the new version are: If a public key object corresponding to the private key is not available when loading an ECDSA private key, the engine loads the public key from a matching certificate, if present. You can use generic PKCS #11 URI (for example pkcs11:type=public ) because the openssl-pkcs11 engine searches all tokens that match a given PKCS #11 URI. The system attempts to log in with a PIN only if a single device matches the URI search. This prevents authentication failures due to providing the PIN to all matching tokens. When accessing a device, the openssl-pkcs11 engine now marks the RSA methods structure with the RSA_FLAG_FIPS_METHOD flag. In FIPS mode, OpenSSL requires the flag to be set in the RSA methods structure. Note that the engine cannot detect whether a device is FIPS-certified. (BZ#1745082) rsyslog rebased to 8.1911.0 The rsyslog utility has been upgraded to upstream version 8.1911.0, which provides a number of bug fixes and enhancements over the version. The following list includes notable enhancements: New omhttp module allows you to send messages over the HTTP REST interface. The file input module is enhanced to improve stability, error reporting, and truncation detection. New action.resumeIntervalMax parameter that can be used with any action allows capping retry interval growth at a specified value. New StreamDriver.PermitExpiredCerts option for TLS permits connections even if a certificate has expired. You can now suspend and resume output based on configured external file content. This is useful in cases where the other end always accepts messages and silently drops them when it is not able to process them all. Error reporting for the file output module is improved and now contains real file names and more information on causes of errors. Disk queues now run multi-threaded, which improves performance. You can set stricter TLS operation modes: checking of the extendedKeyUsage certificate field and stricter checking of the CN/SAN certificate fields. (BZ#1740683) rsyslog now provides the omhttp plugin for communication through an HTTP REST interface With this update of the rsyslog packages, you can use the new omhttp plugin for producing an output compatible with services using a Representational State Transfer (REST) API, such as the Ceph storage platform, Amazon Simple Storage Service (Amazon S3), and Grafana Loki. This new HTTP output module provides a configurable REST path and message format, support for several batching formats, compression, and TLS encryption. For more details, see the /usr/share/doc/rsyslog/html/configuration/modules/omhttp.html file installed on your system with the rsyslog-doc package. ( BZ#1676559 ) omelasticsearch in rsyslog now supports rebindinterval This update of the rsyslog packages introduces support for setting the time of periodical reconnection in the omelasticsearch module. You can improve performance when sending records to a cluster of Elasticsearch nodes by setting this parameter according to your scenario. The value of the rebindinterval parameter indicates the number of operations submitted to a node after which rsyslog closes the connection and establishes a new one. The default value -1 means that rsyslog does not re-establish the connection. ( BZ#1692073 ) rsyslog mmkubernetes now provides metadata cache expiration With this update of the rsyslog packages, you can use two new parameters for the mmkubernetes module for setting metadata cache expiration. This ensures that deleted Kubernetes objects are removed from the mmkubernetes static cache. The value of the cacheentryttl parameter indicates the maximum age of cache entries in seconds. The cacheexpireinterval parameter has the following values: -1 for disabling cache-expiration checks 0 for enabling cache-expiration checks greater than 0 for regular cache-expiration checks in seconds ( BZ#1692072 ) audit rebased to version 3.0-0.14 The audit packages have been upgraded to upstream version 3.0-0.14, which provides many bug fixes and enhancements over the version, most notably: Added an option to interpret fields in the syslog plugin Divided the 30-ospp-v42.rules file into more granular files Moved example rules to the /usr/share/audit/sample-rules/ directory Fixed Audit KRB5 transport mode for remote logging ( BZ#1757986 ) Audit now contains many improvements from the kernel v5.5-rc1 This addition to the Linux kernel contains the majority of enhancements, bug fixes, and cleanups related to the Audit subsystem and introduced between the version 4.18 and 5.5-rc1. The following list highlights important changes: Wider use of the exe field for filtering Support for v3 namespaced capabilities Improvements for filtering on remote file systems Fix of the gid filter rule Fixes of a use-after-free memory corruption and memory leaks Improvements of event-record association Cleanups of the fanoticy interface, Audit configuration options, and the syscall interface Fix of the Extended Verification Module (EVM) return value Fixes and cleanups of several record formats Simplifications and fixes of Virtual File System (VFS) auditing (BZ#1716002) fapolicyd rebased to 0.9.1-2 The fapolicyd packages that provide RHEL application whitelisting have been upgraded to upstream version 0.9.1-2. Notable bug fixes and enhancements include: Process identification is fixed. The subject part and the object part are now positioned strictly in the rule. Both parts are separated by a colon, and they contain the required permission (execute, open, any). The subject and object attributes are consolidated. The new rule format is the following: For example: ( BZ#1759895 ) sudo rebased to 1.8.29-3.el8 sudo packages have been upgraded to upstream version 1.8.29-3, which provides a number of bug fixes and enhancements over the version. The major changes introduced by the new version are: sudo now writes Pluggable Authentication Module (PAM) messages to the user's terminal, if available, instead of the standard output or standard error output. This prevents possible confusion of PAM output and command output sent to files and pipes. The notBefore and notAfter options from LDAP and SSSD now work and display correctly with the sudo -l command. The cvtsudoers command now rejects non-LDAP Data Interchange Format (LDIF) input when converting from LDIF to sudoers and JSON formats. With the new log_allowed and log_denied settings for sudoers , you can disable logging and auditing of allowed and denied commands. You can now use sudo with the -g option to specify a group that matches any of the target user's groups even if no groups are present in the runas_spec specification. Previously, you could only do so if the group matched the target user's primary group. Fixed a bug that prevented sudo from matching the host name to the value of ipa_hostname from sssd.conf , if specified. A vulnerability that allowed a sudo user to run a command as root when the Runas specification disallowed root access with the ALL keyword is now fixed (CVE-2019-14287). The use of unknown user and group IDs for permissive sudoers entries, for example using the ALL keyword, is now disabled. You can enable it with the runas_allow_unknown_id setting (CVE-2019-19232). ( BZ#1733961 ) The pam_namespace module now allows specifying additional mount options for tmpfs The nosuid , noexec , and nodev mount options can now be used in the /etc/security/namespace.conf configuration file to respectively disable setuid bit effect, disable running executables, and to prevent files from being interpreted as character or block devices on the mounted tmpfs filesystem. Additional mount options are specified in the tmpfs(5) man page. (BZ#1252859) pam_faillock can now read settings from faillock.conf configuration file The pam_faillock module, a part of pluggable authentication modules (PAM), can now read settings from the configuration file located at /etc/security/faillock.conf . This makes it easier to set up an account lockout on authentication failures, provide user profiles for this functionality, and handle different PAM configurations by simply editing the faillock.conf file. ( BZ#1537242 ) 5.1.6. Networking User-space applications can now retrieve the netns id selected by the kernel User-space applications can request the kernel to select a new netns ID and assign it to a network name space. With this enhancement, users can specify the NLM_F_ECHO flag when sending an RTM_NETNSID netlink message to the kernel. The kernel then sends the netlink message back to the user. This message includes the netns ID set to the value the kernel selected. As a result, user-space applications now have a reliable option to identify the netlink ID the kernel selected. (BZ#1763661) firewalld rebased to version 0.8 The firewalld packages have been updated to version 0.8. Notable changes include: This version of firewalld includes all bug fixes since version 0.7.0. firewalld now uses the libnftables JSON interface to the nftables subsystem. This improves performance and reliability of rule application. In service definitions, the new helper element replaces module . This version allows custom helpers to use standard helper modules. ( BZ#1740670 ) ndptool can now specify a destination address in IPv6 header With this update, the ndptool utility can send a Neighbor Solicitation (NS) or a Neighbor Advertisement (NA) message to a specific destination by specifying the address in the IPv6 header. As a result, a message can be sent to addresses other than just the link-local address. ( BZ#1697595 ) nftables now supports multi-dimensional IP set types With this enhancement, the nftables packet-filtering framework supports set types with concatenations and intervals. As a result, administrators no longer require workarounds to create multi-dimensional IP set types. (BZ#1593711) nftables rebased to version 0.9.3 The nftables packages have been upgraded to upstream version 0.9.3, which provides a number of bug fixes and enhancements over the version: A JSON API has been added to the libnftables library. This library provides a high-level interface to manage nftables rule sets from third-party applications. To use the new API in Python, install the python3-nftables package. Statements support IP prefixes and ranges, such as 192.0.2.0/24 and 192.0.2.0-192.0.2.30 . Support for operating system fingerprints has been added to mark packets based on the guessed operating system. For further details, see the osf expression section in the nft(8) man page. Transparent proxy support has been added to redirect packets to a local socket without changing the packet header in any way. For details, see the tproxy statement section in the nft(8) man page. By default, nft displays textual names of the priority set while creating the nft chains. To view standard priority numerical values, use the -y option. The security mark support has been added. The support for dynamic sets updates has been improved to set updates from the packet path. The support for transport header port matching has been added. For further information about notable changes, read the upstream release notes before updating: https://lore.kernel.org/netfilter-devel/20190624164910.defehs5giqziqnir@salvia/ https://lore.kernel.org/netfilter-devel/20190819115807.myv6owxzblj2bthd@salvia/ https://lore.kernel.org/netfilter-devel/20191202211737.xvmd6e6xxj4xvvjl@salvia/ (BZ#1643192) Rules for the firewalld service can now use connection tracking helpers for services running on a non-standard port User-defined helpers in the firewalld service can now use standard kernel helper modules. This enables administrators to create firewalld rules to use connection tracking helpers for services running on a non-standard port. ( BZ#1733066 ) The whois package is now available With this enhancement, the whois package is now available in RHEL 8.2.0. As a result, retrieving information about a specific domain name or IP address is now possible. (BZ#1734183) eBPF for tc is now fully supported The Traffic Control (tc) kernel subsystem and the tc tool can attach extended Berkeley Packet Filtering (eBPF) programs as packet classifiers and actions for both ingress and egress queueing disciplines. This enables programmable packet processing inside the kernel network data path. eBPF for tc , previously available as a technology preview, is now fully supported in RHEL 8.2. ( BZ#1755347 ) 5.1.7. Kernel Kernel version in RHEL 8.2 Red Hat Enterprise Linux 8.2 is distributed with the kernel version 4.18.0-193. See also Important Changes to External Kernel Parameters and Device Drivers . ( BZ#1797671 ) Extended Berkeley Packet Filter for RHEL 8.2 The Extended Berkeley Packet Filter (eBPF) is an in-kernel virtual machine that allows code execution in the kernel space, in the restricted sandbox environment with access to a limited set of functions. The virtual machine executes a special assembly-like code. The eBPF bytecode first loads to the kernel, followed by its verification, code translation to the native machine code with just-in-time compilation, and then the virtual machine executes the code. Red Hat ships numerous components that utilize the eBPF virtual machine. Each component is in a different development phase, and thus not all components are currently fully supported. In RHEL 8.2, the following eBPF components are supported: The BPF Compiler Collection (BCC) tools package, which is a userspace collection of dynamic kernel tracing utilities that use the eBPF virtual machine for creating efficient kernel tracing and manipulation programs. The BCC provides tools for I/O analysis, networking, and monitoring of Linux operating systems using eBPF . The BCC library which allows the development of tools similar to those provided in the BCC tools package. The eBPF for Traffic Control (tc) feature, which enables programmable packet processing inside the kernel network data path. All other eBPF components are available as Technology Preview, unless a specific component is indicated as supported. The following notable eBPF components are currently available as Technology Preview: The bpftrace tracing language The eXpress Data Path (XDP) feature For more information regarding the Technology Preview components, see Technology Previews . ( BZ#1780124 ) Intel (R) Omni-Path Architecture (OPA) Host Software Intel Omni-Path Architecture (OPA) host software is fully supported in Red Hat Enterprise Linux 8.2. Intel OPA provides Host Fabric Interface (HFI) hardware with initialization and setup for high performance data transfers (high bandwidth, high message rate, low latency) between compute and I/O nodes in a clustered environment. (BZ#1833541) Control Group v2 is now fully supported in RHEL 8 Control Group v2 mechanism is a unified hierarchy control group. Control Group v2 organizes processes hierarchically and distributes system resources along the hierarchy in a controlled and configurable manner. Unlike the version, Control Group v2 has only a single hierarchy. This single hierarchy enables the Linux kernel to: Categorize processes based on the role of their owner. Eliminate issues with conflicting policies of multiple hierarchies. Control Group v2 supports numerous controllers. Some of the examples are: CPU controller regulates the distribution of CPU cycles. This controller implements: Weight and absolute bandwidth limit models for normal scheduling policy. Absolute bandwidth allocation model for real-time scheduling policy. Cpuset controller confines processor and/or memory placement of processes to only those of the mentioned resources that are specified in the cpuset interface files. Memory controller regulates the memory distribution. Currently, the following types of memory usages are tracked: Userland memory - page cache and anonymous memory. Kernel data structures such as dentries and inodes. TCP socket buffers. I/O controller regulates the distribution of I/O resources. Writeback controller interacts with both Memory and I/O controllers and is Control Group v2 specific. The information above was based on Control Group v2 upstream documentation. You can refer to the same link to obtain more information about particular Control Group v2 controllers. Be warned that not all features mentioned in the upstream document are implemented yet in RHEL 8. (BZ#1401552) Randomizing free lists: Improved performance and utilization of direct-mapped memory-side-cache With this enhancement, you can enable page allocator to randomize free lists and improve the average utilization of a direct-mapped memory-side-cache. The kernel command-line option page_alloc.shuffle , enables the page allocator to randomize the free lists and sets the boolean flag to True . The sysfs file, which is located at /sys/module/page_alloc/parameters/shuffle reads the flag status, shuffles the free lists, such that the Dynamic Random Access Memory (DRAM) is cached, and the latency band between the DRAM and persistent memory is reduced. As a result, persistent memory with a higher capacity and lower bandwidth is available on general purpose server platforms. (BZ#1620349) The TPM userspace tool has been updated to the last version The tpm2-tools userspace tool has been updated to version 3.2.1. This update provides several bug fixes, in particular relating to Platform Configuration Register code and manual page clean ups. (BZ#1725714) The C620-series PCH chipset now supports the Intel Trace Hub feature This update adds hardware support for Intel Trace Hub (TH) in C620-series Platform Controller Hub (PCH), also known as Lewisburg PCH. Users with C620-series PCH can now use Intel TH. (BZ#1714486) The perf tool now supports per die events aggregation for CLX-AP and CPX processors With this update, the perf tool now provides support for per-die event counts aggregation for some Intel CPUs with multiple dies. To enable this mode, add the --per-die option in addition to the -a option for Xeon Cascade Lake-AP (CLX-AP) and Cooper Lake (CPX) system processors. As a result, this update detects any imbalance between the dies. The perf stat command captures the event counts and displays the output as: (BZ#1660368) The threshold of crashkernel=auto is decreased on IBM Z The lower threshold of the crashkernel=auto kernel command-line parameter is now decreased from 4G to 1G on IBM Z systems. This implementation allows the IBM Z to align with the threshold of the AMD64 and Intel 64 systems to share the same reservation policy on the lower threshold of crashkernel=auto . As a result, the crash kernel is able to automatically reserve memory for kdump on systems with less than 4GB RAM. (BZ#1780432) The numactl manual entry clarifies the memory usage output With this release of RHEL 8, the manual page for numactl explicitly mentions that the memory usage information reflects only the resident pages on the system. The reason for this addition is to eliminate potential confusion for users whether the memory usage information relates to resident pages or virtual memory. ( BZ#1730738 ) The kexec-tools document is now updated to include Kdump FCoE target support In this release, the /usr/share/doc/kexec-tools/supported-kdump-targets.txt file has been updated to include Kdump Fibre Channel over Ethernet (FCoE) target support. As a result, users can now have better understanding of the status and details of the kdump crash dumping mechanism on a FCoE target support. (BZ#1690729) Firmware-assisted dump now supports PowerNV Firmware-assisted dump ( fadump ) mechanism is now supported on the PowerNV platform. The feature is supported with the IBM POWER9 FW941 firmware version and later. At the time of system failure, fadump , along with the vmcore file, also exports the opalcore file. The opalcore file contains information about the state of OpenPOWER Abstraction Layer (OPAL) memory at the time of breakdown. The opalcore file is helpful in debugging crashes of OPAL-based systems. (BZ#1524687) kernel-rt source tree now matches the latest RHEL 8 tree The kernel-rt sources have been updated to use the latest RHEL kernel source tree. The realtime patch set has also been updated to the latest upstream v5.2.21-rt13 version. Both of these updates provide a number of bug fixes and enhancements. (BZ#1680161) rngd is now able to run with non-root privileges The random number generator daemon ( rngd ) checks whether data supplied by the source of randomness is sufficiently random and then stores the data in the kernel's random-number entropy pool. With this update, rngd is able to run with non-root user privileges to enhance system security. ( BZ#1692435 ) Virtual Persistent Memory now supported for RHEL 8.2 and later on POWER 9 When running a RHEL 8.2 or later host with a PowerVM hypervisor on IBM POWER9 hardware, the host can now use the Virtual Persistent Memory (vPMEM) feature. With vPMEM, data persists across application and partition restarts until the physical server is turned off. As a result, restarting workloads that use vPMEM is significantly faster. The following requirements must be met for your system to be able to use vPMEM: Hardware Management Console (HMC) V9R1 M940 or later Firmware level FW940 or later E980 system firmware FW940 or later L922 system firmware FW940 or later PowerVM level V3.1.1 Note that several known issues currently occur in RHEL 8 with vPMEM. For details, see the following Knowledgebase articles: Hot plug/unplug of pmem memory can cause kernel panic on POWER9 Booting of the capture kernel takes a very long time using vPMEM namespaces as a dump target for kdump/fadump (BZ#1859262) 5.1.8. File systems and storage LVM now supports the dm-writecache caching method LVM cache volumes now provide the dm-writecache caching method in addition to the existing dm-cache method. dm-cache This method speeds up access to frequently used data by caching it on the faster volume. The method caches both read and write operations. dm-writecache This method caches only write operations. The faster volume, usually an SSD or a persistent memory (PMEM) disk, stores the write operations first and then migrates them to the slower disk in the background. To configure the caching method, use the --type cache or --type writecache option with the lvconvert utility. For more information, see Caching logical volumes . (BZ#1600174) VDO async policy is now ACID compliant With this release, the VDO async write mode is now compliant with Atomicity, Consistency, Isolation, Durability (ACID). If the system unexpectedly halts while VDO is writing data in async mode, the recovered data is now always consistent. Due to the ACID compliance, the performance of async is now lower compared to the release. To restore the original performance, you can change the write mode on your VDO volume to async-unsafe mode, which is not ACID compliant. For more information, see Selecting a VDO write mode . (BZ#1657301) You can now import VDO volumes The vdo utility now enables you to import existing VDO volumes that are currently not registered on your system. To import a VDO volume, use the vdo import command. Additionally, you can modify the Universally Unique Identifier (UUID) of a VDO volume using the vdo import command. ( BZ#1713749 ) New per-op error counter is now available in the output of the mountstats and nfsiostat A minor supportability feature is available for the NFS client systems: the output of the mountstats and nfsiostat commands in nfs-utils have a per-op error count. This enhancement allows these tools to display per-op error counts and percentages that can assist in narrowing down problems on specific NFS mount points on an NFS client machine. Note that these new statistics depend on kernel changes that are inside the Red Hat Enterprise Linux 8.2 kernel. ( BZ#1719983 ) Writeback IOs with cgroup awareness is now available in XFS With this release, XFS supports writeback IOs with cgroup awareness. In general, cgroup writeback requires explicit support from the underlying file system. Until now, writeback IOs on XFS was the attribute for the root cgroup only. (BZ#1274406) The FUSE file systems now implement copy_file_range() The copy_file_range() system call provides a way for file systems to implement efficient data copy mechanism. With this update, GlusterFS, which is using the Filesystem in Userspace (FUSE) framework takes advantage of this mechanism. Since read/write functionality of FUSE file systems involves multiple copies of data, using copy_file_range() can significantly improve performance. (BZ#1650518) Support for per-op statistics is now available for the mountstats and nfsiostat commands A support feature is now available for the NFS client systems: the /proc/self/mountstats file has the per-op error counter. With this update, under each per-op statistics row, the ninth number indicates the number of the operations that have been completed with a status value less then zero. This status value indicates an error. For more information, see the updates to the mountstats and nfsiostat programs in the nfs-utils that displays these new error counts. (BZ#1636572) New mount stats lease_time and lease_expired are available in /proc/self/mountstats file A support feature is available for NFSv4.x client systems. The /proc/self/mountstats file has the lease_time and the lease_expired fields at the end of the line starting with nfsv4: . The lease_time field indicates the number of seconds in the NFSv4 lease time. The lease_expired field indicates the number of seconds since the lease has expired, or 0 if the lease has not expired. (BZ#1727369) Surprise removal of NVMe devices With this enhancement, you can surprise remove NVMe devices from the Linux operating system without notifying the operating system beforehand. This will enhance the serviceability of NVMe devices because no additional steps are required to prepare the devices for orderly removal, which ensures the availability of servers by eliminating server downtime. Note the following: Surprise removal of NVMe devices requires kernel-4.18.0-193.13.2.el8_2.x86_64 version or later. Additional requirements from the hardware platform or the software running on the platform might be necessary for successful surprise removal of NVMe devices. Surprise removing an NVMe device that is critical to the system operation is not supported. For example, you cannot remove an NVMe device that contains the operating system or a swap partition. (BZ#1634655) 5.1.9. High availability and clusters New command options to disable a resource only if this would not affect other resources It is sometimes necessary to disable resources only if this would not have an effect on other resources. Ensuring that this would be the case can be impossible to do by hand when complex resource relations are set up. To address this need, the pcs resource disable command now supports the following options: pcs resource disable --simulate : show effects of disabling specified resource(s) while not changing the cluster configuration pcs resource disable --safe : disable specified resource(s) only if no other resources would be affected in any way, such as being migrated from one node to another pcs resource disable --safe --no-strict : disable specified resource(s) only if no other resources would be stopped or demoted In addition, the pcs resource safe-disable command has been introduced as an alias for pcs resource disable --safe . ( BZ#1631519 ) New command to show relations between resources The new pcs resource relations command allows you to display the relations between cluster resources in a tree structure. (BZ#1631514) New command to display the status of both a primary site and recovery site cluster If you have configured a cluster to use as a recovery site, you can now configure that cluster as a recovery site cluster with the pcs dr command. You can then use the pcs dr command to display the status of both your primary site cluster and your recovery site cluster from a single node. (BZ#1676431) Expired resource constraints are now hidden by default when listing constraints Listing resource constraints no longer by default displays expired constraints. To include expired constaints, use the --all option of the pcs constraint command. This will list expired constraints, noting the constraints and their associated rules as (expired) in the display. ( BZ#1442116 ) Pacemaker support for configuring resources to remain stopped on clean node shutdown When a cluster node shuts down, Pacemaker's default response is to stop all resources running on that node and recover them elsewhere. Some users prefer to have high availability only for failures, and to treat clean shutdowns as scheduled outages. To address this, Pacemaker now supports the shutdown-lock and shutdown-lock-limit cluster properties to specify that resources active on a node when it shuts down should remain stopped until the node rejoins. Users can now use clean shutdowns as scheduled outages without any manual intervention. For information on configuring resources to remain stopped on a clean node shutdown, see link: Configuring resources to remain stopped on clean node shutdown . ( BZ#1712584 ) Support for running the cluster environment in a single node A cluster with only one member configured is now able to start and run resources in a cluster environment. This allows a user to configure a separate disaster recovery site for a multi-node cluster that uses a single node for backup. Note that a cluster with only one node is not in itself fault tolerant. (BZ#1700104) 5.1.10. Dynamic programming languages, web and database servers A new module: python38 RHEL 8.2 introduces Python 3.8, provided by the new module python38 and the ubi8/python-38 container image. Notable enhancements compared to Python 3.6 include: New Python modules, for example, contextvars , dataclasses , or importlib.resources New language features, such as assignment expressions (the so-called walrus operator, := ) or positional-only parameters Improved developer experience with the breakpoint() built-in function, the = format string specification, and compatibility between debug and non-debug builds of Python and extension modules Performance improvements Improved support for optional static type hints An addition of the = specifier to formatted string literals (f-strings) for easier debugging Updated versions of packages, such as pip , requests , or Cython Python 3.8 and packages built for it can be installed in parallel with Python 3.6 on the same system. Note that the python38 module does not include the same binary bindings to system tools (RPM, DNF, SELinux, and others) that are provided for the python36 module. To install packages from the python38 module, use, for example: The python38:3.8 module stream will be enabled automatically. To run the interpreter, use, for example: See Installing and using Python for more information. Note that Red Hat will continue to provide support for Python 3.6 until the end of life of RHEL 8. Python 3.8 will have a shorter life cycle, see RHEL 8 Application Streams Life Cycle . (BZ#1747329) Changes in mod_wsgi installation Previously, when the user tried to install the mod_wsgi module using the yum install mod_wsgi command, the python3-mod_wsgi package was always installed. RHEL 8.2 introduces Python 3.8 as an addition to Python 3.6. With this update, you need to specify which version of mod_wsgi you want to install, otherwise an error message is returned. To install the Python 3.6 version of mod_wsgi : To install the Python 3.8 version of mod_wsgi : Note that the python3-mod_wsgi and python38-mod_wsgi packages conflict with each other, and only one mod_wsgi module can be installed on a system due to a limitation of the Apache HTTP Server. This change introduced a dependency known issue described in BZ#1829692 . (BZ#1779705) Support for hardware-accelerated deflate in zlib on IBM Z This update adds support for a hardware-accelerated deflate algorithm to the zlib library in the IBM Z mainframes. As a result, performance of compression and decompression on IBM Z vector machines has been improved. (BZ#1659433) Performance improved when decompressing gzip on IBM Power Systems, little endian This update adds optimization for the 32-bit Cyclic Redundancy Check (CRC32) to the zlib library on IBM Power Systems, little endian. As a result, performance of decompressing gzip files has been improved. (BZ#1666798) A new module stream: maven:3.6 RHEL 8.2 introduces a new module stream, maven:3.6 . This version of the Maven software project management and comprehension tool provides numerous bug fixes and various enhancements over the maven:3.5 stream distributed with RHEL 8.0. To install the maven:3.6 stream, use: If you want to upgrade from the maven:3.5 stream, see Switching to a later stream . (BZ#1783926) mod_md now supports the ACMEv2 protocol The mod_md module has been updated to version 2.0.8. This update adds a number of features, notably support for version 2 of the Automatic Certificate Management Environment (ACME) certificate issuance and management protocol, which is the Internet Engineering Task Force (IETF) standard (RFC 8555). The original ACMEv1 protocol remains supported but is deprecated by popular service providers. (BZ#1747923) New extensions for PHP 7.3 The php:7.3 module stream has been updated to provide two new PHP extensions: rrd and Xdebug . The rrd extension provides bindings to the RRDtool C library. RRDtool is a high performance data logging and graphing system for time series data. The Xdebug extension is included to assist you with debugging and development. Note that the extension is provided only for development purposes and should not be used in production environments. For information about installing and using PHP in RHEL 8, see Using the PHP scripting language . (BZ#1769857, BZ#1764738) New packages: perl-LDAP and perl-Convert-ASN1 This update adds the perl-LDAP and Perl-Convert-ASN1 packages to RHEL 8. The perl-LDAP package provides an LDAP client for the Perl language. perl-LDAP requires the perl-Convert-ASN1 package, which encodes and decodes Abstract Syntax Notation One (ASN.1) data structures using Basic Encoding Rules (BER) and Distinguished Encoding Rules (DER). ( BZ#1663063 , BZ#1746898 ) sscg now supports generating private key files protected by a password The sscg utility is now able to generate private key files protected by a password. This adds another level of protection for private keys, and it is required by some services, such as FreeRADIUS. ( BZ#1717880 ) 5.1.11. Compilers and development tools grafana rebased to version 6.3.6 The grafana package has been upgraded to version 6.3.6, which provides multiple bug fixes and enhancements. Notable changes include: Database: Rewrites system statistics query for better performance. Explore: Fixes query field layout in split view for the Safari browsers. Adds Live option for the supported data sources, adds the orgId to URL for sharing purposes. Adds support for the new loki start and end parameters for labels endpoint. Adds support for toggling raw query mode in the Explore, allow switching between metrics and logs. Displays log lines context, does not parse log levels if provided by field or label. Supports new LogQL filtering syntax. Uses new TimePicker from Grafana/UI. Handles newlines in the LogRow Highlighter. Fixes browsing back to the dashboard panel. Fixes filter by series level in logs graph. Fix issues when loading and graph/table are collapsed. Fixes the selection/copy of log lines. Dashboard: Fixes dashboards init failed loading error for dashboards with panel links that had missing properties, and fixes timezone dashboard setting while exporting to the comma-separated values (CSV) Data links. Editor: Fixes issue where only entire lines were being copied. LDAP: Integration of the multi ldap and ldap authentication components. Profile/UserAdmin: Fixes user agent parser crashing the grafana-server on 32-bit builds. Prometheus: Prevents panel editor crash while switching to the Prometheus data source, changes brace-insertion behaviour to be less annoying. Fixes queries with the label_replace and removes the USD1 match when loading the query editor. Consistently allows multi-line queries in the editor, taking timezone into account for the step alignment. Uses the overridden panel range for USD__range instead of the dashboard range. Adds time range filter to series labels query, escapes | literals in the interpolated PromQL variables. Fixes while adding labels for metrics which contain colons in the Explore. Auth: Allows expiration of the API keys, returns device, os and browser while listing user auth tokens in HTTP API, supports list and revoke of user auth tokens in UI. DataLinks: Correctly applies scoped variables to the data links, follows timezone while displaying datapoint timestamp in the graph context menu, uses datapoint timestamp correctly when interpolating the variables, fixes the incorrect interpolation of the USD{__series_name} . Graph: Fixes legend issue clicking on series line icon and issue with horizontal scrollbar being visible on windows, adds new fill gradient option. Graphite: Avoids the glob of single-value array variables, fixes issues with alias function being moved last, fixes issue with the seriesByTag & function with variable parameter, uses POST for /metrics/find requests. TimeSeries: Assumes values are all numbers. Gauge/BarGauge: Fixes issue with lost thresholds and an issue loading Gauge with the avg stat. PanelLinks: Fixes crash issue with Gauge & Bar Gauge panels with panel links (drill down links), fixes render issue while there is no panel description. OAuth: Fixes the missing saved state OAuth login failure due to SameSite cookie policy, fixes for wrong user token updated on the OAuth refresh in DS proxy. Auth Proxy: Includes additional headers as a part of the cache key. cli : Fix for recognizing when in dev mode, fixes the issue of encrypt-datasource-passwords failing with the sql error. Permissions: Show plugins in the navigation for non admin users but hides plugin configuration. TimePicker: Increases max height of quick range dropdown and fixes style issue for custom range popover. Loki: Displays live tailed logs in correct order in the Explore. Timerange: Fixes a bug where custom time ranges were not following the Universal Time Coordinated (UTC). remote_cache : Fixes the redis connstr parsing. Alerting: Add tags to alert rules, attempts to send email notifications to all the given email addresses, improves alert rule testing, support for configuring the content field for the Discord alert notifier. Alertmanager: Replaces illegal characters with underscore in the label names. AzureMonitor: Changes clashing built-in Grafana variables or macro names for the Azure Logs. CloudWatch: Made region visible for Amazon Web Services (AWS) Cloudwatch Expressions, adds the AWS DocDB metrics. GraphPanel: Do not sort series when legend table and sort column is not visible. InfluxDB: Supports visualizing logs in the Explore. MySQL/Postgres/MSSQL: Adds parsing for day, weeks, and year intervals in macros, adds support for periodically reloading client certs. Plugins: Replaces the dataFormats list with the skipDataQuery flag in the plugin.json file. Refresh picker: Handles empty intervals. Singlestat: Add y min/max configuration to the singlestat sparklines. Templating: Correctly displays the __text in the multi-value variable after page reloads, supports selecting all the filtered values of a multi-value variable. Frontend: Fixes Json tree component not working issue. InfluxDB: Fixes issues with single quotes not escaped in the label value filters. Config: Fixes the connectionstring option for the remote_cache in the defaults.ini file. Elasticsearch: Fixes the empty query (via template variable) should be sent as wildcard, fixes the default max concurrent shard requests, supports visualizing logs in the Explore. TablePanel: Fixes the annotations display. Grafana-CLI: Fixes receiving flags via command line, wrapper for the grafana-cli within the RPM/DEB packages and config/homepath are now global flags. HTTPServer: Fixes the X-XSS-Protection header formatting, options for returning new headers X-Content-Type-Options , X-XSS-Protection and Strict-Transport-Security , fixes the Strict-Transport-Security header, serves Grafana with a custom URL path prefix. ( BZ#1725278 ) pcp rebased to version 5.0.2 The pcp package has been upgraded to version 5.0.2, which provides multiple bug fixes and enhancements. Notable changes include: The pcp-webapp-* packages are now replaced by the grafana-pcp package and pmproxy . The pcp-collectl tool is now replaced by the pmrep configurations. New and improved performance metric domain agents (PMDAs): pmdamssql : New PMDA for Microsoft SQL Server implementation. pmdanetcheck : New PMDA to perform network checks. pmdaopenmetrics : Renames prometheus agent to openmetrics . pmdanfsclient : Adds the per-op and per-mount rpc error metrics. pmdalmsensors : Improvements in the name parsing and error handling. pmdaperfevent : Supports hv_24x7 nest events on the multi-node system. pmdalinux : Correctly handles sparse or discontinuous numa nodes. Uses cpu instname and not the instid for per-cpu numa stats. Adds an active and total slabs to slabinfo v2 parsing Fixes several unix socket, icmp6 metrics, hugepage metric value. calculations, segfault in interrupts code with large CPU counts Fetches more network metrics in the --container namespace. pmdabcc : Fixes the tracepoints module for the bcc 0.10.0 and higher versions pmdabpftrace : New PMDA for metrics from the bpftrace scripts pmdaproc : Fixes memory leak in the pidlist refresh. Avoids excessive stat calls in cgroups_scan . Retains cgroup paths and only un-escape instance names. pmdaroot : Improves handling of cached or inactive the cgroup behaviour and refreshes the container indom on cgroup fs change as well. Fixes to collector (server) tools: pmproxy : Openmetrics support via the /metrics endpoint, consolidates the pmseries/grafana REST API, and adds new async PMWEBAPI(3) REST API implementation. selinux : Numerous pcp policy updates. python pmdas : Enables authentication support, new set_comm_flags method to set the communication flags. python api : Exports the pmdaGetContext() and adds debugging wrapper. perl api : Ensures context set up for PMDA store as with python wrapper. systemd : Adds 120s timeout in all the services and fixes failure to start the pmlogger service. Fixes to analysis (client) tools: pmchart : Fixes chart auto-scaling under fetch error conditions. pmrep : Fixes the wait.formula for collectl-dm-sD and collectl-sD . pmseries : Provides support for the delta keyword and better timestamps. pcp-atop : Fixes the write mode ( -w ) to handle the proc vs hotproc metrics. pcp-atopsar : Fixes the mishandling of a few command line arguments. pcp-dstat : Fixes misaligned headers in CSV output and handling of the --bits command line option. libpcp : Fixes the cockpit-pcp segv with local context and multi-archive replay error handling for the corrupted archive(s). ( BZ#1723598 ) grafana-pcp is now available in RHEL 8.2 The grafana-pcp package provides new grafana data sources and application plugins connecting PCP with grafana . With the grafana-pcp package, you can analyze historical PCP metrics and real-time PCP metrics using the pmseries query language and pmwebapi live services respectively. For more information, see Performance Co-Pilot Grafana Plugin . (BZ#1685315) Updated GCC Toolset 9 GCC Toolset 9 is a compiler toolset that provides recent versions of development tools. It is available as an Application Stream in the form of a Software Collection in the AppStream repository. Notable changes introduced with RHEL 8.2 include: The GCC compiler has been updated to version 9.2.1, which provides many bug fixes and enhancements that are available in upstream GCC. The GCC Toolset 9 components are now available in the two container images: rhel8/gcc-toolset-9-toolchain , which includes the GCC compiler, the GDB debugger, and the make automation tool. rhel8/gcc-toolset-9-perftools , which includes the performance monitoring tools, such as SystemTap and Valgrind. To pull a container image, run the following command as root: The following tools and versions are provided by GCC Toolset 9: Tool Version GCC 9.2.1 GDB 8.3 Valgrind 3.15.0 SystemTap 4.1 Dyninst 10.1.0 binutils 2.32 elfutils 0.176 dwz 0.12 make 4.2.1 strace 5.1 ltrace 0.7.91 annobin 9.08 To install GCC Toolset 9, run the following command as root: To run a tool from GCC Toolset 9: To run a shell session where tool versions from GCC Toolset 9 take precedence over system versions of these tools: For more information, see Using GCC Toolset . ( BZ#1789401 ) GCC Toolset 9 now supports NVIDIA PTX target offloading The GCC compiler in GCC Toolset 9 now supports OpenMP target offloading for NVIDIA PTX. (BZ#1698607) The updated GCC compiler is now available for RHEL 8.2 The system GCC compiler, version 8.3.1, has been updated to include numerous bug fixes and enhancements available in the upstream GCC. The GNU Compiler Collection (GCC) provides tools for developing applications with the C, C++, and Fortran programming languages. For usage information, see Developing C and C++ applications in RHEL 8 . (BZ#1747157) A new tunable for changing the maximum fastbin size in glibc The malloc function uses a series of fastbins that hold reusable memory chunks up to a specific size. The default maximum chunk size is 80 bytes on 32-bit systems and 160 bytes on 64-bit systems. This enhancement introduces a new glibc.malloc.mxfast tunable to glibc that enables you to change the maximum fastbin size. ( BZ#1764218 ) Vectorized math library is now enabled for GNU Fortran in GCC Toolset 9 With this enhancement, GNU Fortran from GCC Toolset can now use routines from the vectorized math library libmvec . Previously, the Fortran compiler in GCC Toolset needed a Fortran header file before it could use routines from libmvec provided by the GNU C Library glibc . ( BZ#1764238 ) The glibc.malloc.tcache tunable has been enhanced The glibc.malloc.tcache_count tunable allows to set the maximum number of memory chunks of each size that can be stored in the per-thread cache (tcache). With this update, the upper limit of the glibc.malloc.tcache_count tunable has been increased from 127 to 65535. ( BZ#1746933 ) The glibc dynamic loader is enhanced to provide a non-inheriting library preloading mechanism With this enhancement, the loader can now be invoked to load a user program with a --preload option followed by a colon-separated list of libraries to preload. This feature allows users to invoke their programs directly through the loader with a non-inheriting library preload list. Previously, users had to use the LD_PRELOAD environment variable which was inherited by all child processes through their environment. ( BZ#1747453 ) GDB now supports the ARCH(13) extension on the IBM Z architecture With this enhancement, the GNU Debugger (GDB) now supports the new instructions implemented by the ARCH(13) extension on the IBM Z architecture. ( BZ#1768593 ) elfutils rebased to version 0.178 The elfutils package has been upgraded to version 0.178, which provides multiple bug fixes and enhancements. Notable changes include: elfclassify : a new tool to analyze ELF objects. debuginfod : a new server, client tool, and library to index and automatically fetch ELF, DWARF, and source from files and RPM archives through HTTP. libebl is now directly compiled into libdw.so . eu-readelf has multiple new flags for notes, section numbering, and symbol tables. libdw has improved multithreading support. libdw supports additional GNU DWARF extensions. ( BZ#1744992 ) SystemTap rebased to version 4.2 The SystemTap instrumentation tool has been updated to version 4.2. Notable enhancements include: Backtraces can now include source file names and line numbers. Numerous Berkeley Packet Filter (BPF) back-end extensions are now available, for example, for looping, timing, and other processes. A new service for managing SystemTap scripts is available. This service sends metrics to a Prometheus-compatible monitoring system. SystemTap has inherited functionality of a new HTTP file server for elfutils called debuginfod . This server automatically sends debugging resources to SystemTap. ( BZ#1744989 ) Enhancements to IBM Z series performance counters IBM Z series type 0x8561, 0x8562, and 0x3907 (z14 ZR1) machines are now recognized by libpfm . Performance events for monitoring elliptic-curve cryptography (ECC) operations on IBM Z series are now available. This allows monitoring of additional subsystems on IBM Z series machines. (BZ#1731019) Rust Toolset rebased to version 1.41 Rust Toolset has been updated to version 1.41. Notable changes include: Implementing new traits is now easier because the orphan rule is less strict. You can now attach the #[non_exhaustive] attribute to a struct , an enum , or enum variants. Using Box<T> in the Foreign Function Interface (FFI) has more guarantees now. Box<T> will have the same Application Binary Interface (ABI) as a T* pointer in the FFI. Rust is supposed to detect memory-safety bugs at compile time, but the borrow checker had limitations and allowed undefined behaviour and memory unsafety. The new non-lexical lifetimes (NLL) borrow checker can report memory unsafety problems as hard errors. It now applies to the Rust 2015 and Rust 2018 editions. Previously, in Rust 2015 the NLL borrow checker only raised warnings about such problems. To install the rust-toolset module, run the following command as root: For usage information, see Using Rust Toolset . (BZ#1776847) LLVM Toolset rebased to version 9.0.1 LLVM Toolset has been upgraded to version 9.0.1. With this update, the asm goto statements are now supported. This change allows to compile the Linux kernel on the AMD64 and Intel 64 architectures. To install the llvm-toolset module, run the following command as root: For more information, see Using LLVM Toolset . (BZ#1747139) Go Toolset rebased to version 1.13 Go Toolset has been upgraded to version 1.13. Notable enhancements include: Go can now use a FIPS-certified cryptographic module when the RHEL system is booted in the FIPS mode. Users can enable this mode manually using the GOLANG_FIPS=1 environment variable. The Delve debugger, version 1.3.2, is now available for Go. It is a source-level debugger for the Go ( golang ) programming language. To install the go-toolset module, run the following command as root: To install the Delve debugger, run the following command as root: To debug a helloworld.go program using Delve, run the following command: For more information on Go Toolset, see Using Go Toolset . For more information on Delve, see the upstream Delve documentation . (BZ#1747150) OpenJDK now supports also secp256k1 Previously, Open Java Development Kit (OpenJDK) could use only curves from the NSS library. Consequently, OpenJDK provided only the secp256r1, secp384r1, and secp521r1 curves for elliptic curve cryptography (ECC). With this update, OpenJDK uses the internal ECC implementation and supports also the secp256k1 curve. ( BZ#1746875 , BZ#1746879 ) 5.1.12. Identity Management IdM now supports new Ansible management modules This update introduces several ansible-freeipa modules for automating common Identity Management (IdM) tasks using Ansible playbooks: The ipauser module automates adding and removing users. The ipagroup module automates adding and removing users and user groups to and from user groups. The ipahost module automates adding and removing hosts. The ipahostgroup module automates adding and removing hosts and host groups to and from host groups. The ipasudorule module automates the management of sudo command and sudo rule. The ipapwpolicy module automates the configuration of password policies in IdM. The ipahbacrule module automates the management of host-based access control in IdM. Note that you can combine two or more ipauser calls into one with the users variable or, alternatively, use a JSON file containing the users. Similarly, you can combine two or more ipahost calls into one with the hosts variable or, alternatively, use a JSON file containing the hosts. The ipahost module can also ensure the presence or absence of several IPv4 and IPv6 addresses for a host. (JIRA:RHELPLAN-37713) IdM Healthcheck now supports screening DNS records This update introduces a standalone manual test of DNS records on an Identity Management (IdM) server. The test uses the Healthcheck tool and performs a DNS query using the local resolver in the etc/resolv.conf file. The test ensures that the expected DNS records required for autodiscovery are resolvable. (JIRA:RHELPLAN-37777) Direct integration of RHEL into AD using SSSD now supports FIPS With this enhancement, the System Services Security Daemon (SSSD) now integrates with Active Directory (AD) deployments whose authentication mechanisms use encryption types that were approved by the Federal Information Processing Standard (FIPS). The enhancement enables you to directly integrate RHEL systems into AD in environments that must meet the FIPS criteria. ( BZ#1841170 ) The SMB1 protocol has been disabled in the Samba server and client utilities by default In Samba 4.11, the default values of the server min protocol and client min protocol parameters have been changed from NT1 to SMB2_02 because the server message block version 1 (SMB1) protocol is deprecated. If you have not set these parameters in the /etc/samba/smb.conf file: Clients that only support SMB1 are no longer able to connect to the Samba server. Samba client utilities, such as smbclient , and the libsmbclient library fail to connect to servers that only support SMB1. Red Hat recommends to not use the SMB1 protocol. However, if your environment requires SMB1, you can manually re-enable the protocol. To re-enable SMB1 on a Samba server: Add the following setting to the /etc/samba/smb.conf file: Restart the smb service: To re-enable SMB1 for Samba client utilities and the libsmbclient library: Add the following setting to the /etc/samba/smb.conf file: Restart the smb service: Note that the SMB1 protocol will be removed in a future Samba release. ( BZ#1785248 ) samba rebased to version 4.11.2 The samba packages have been upgraded to upstream version 4.11.2, which provides a number of bug fixes and enhancements over the version. Notable changes include: By default, the server message block version 1 (SMB1) protocol is now disabled in the Samba server, client utilities, and the libsmbclient library. However, you can still set the server min protocol and client min protocol parameters manually to NT1 to re-enable SMB1. Red Hat does not recommend to re-enabling the SMB1 protocol. The lanman auth and encrypt passwords parameters are deprecated. These parameters enable insecure authentication and are only available in the deprecated SMB1 protocol. The -o parameter has been removed from the onode clustered trivial database (CTDB) utility. Samba now uses the GnuTLS library for encryption. As a result, if the FIPS mode in RHEL is enabled, Samba is compliant with the FIPS standard. The ctdbd service now logs when it uses more than 90% of a CPU thread. The deprecated Python 2 support has been removed. Samba automatically updates its tdb database files when the smbd , nmbd , or winbind service starts. Back up the database files before starting Samba. Note that Red Hat does not support downgrading tdb database files. For further information about notable changes, read the upstream release notes before updating: https://www.samba.org/samba/history/samba-4.11.0.html ( BZ#1754409 ) Directory Server rebased to version 1.4.2.4 The 389-ds-base packages have been upgraded to upstream version 1.4.2.4, which provides a number of bug fixes and enhancements over the version. For a complete list of notable changes, read the archived upstream release notes before updating: 389 Directory Server Release Notes archive includes release notes for the following releases: * 389 Directory Server 1.4.2.4 * 389 Directory Server 1.4.2.3 * 389 Directory Server 1.4.2.2 * 389 Directory Server 1.4.2.1 ( BZ#1748994 ) Certain legacy scripts have been replaced in Directory Server This enhancement provides replacements for the unsupported dbverify , validate-syntax.pl , cl-dump.pl , fixup-memberuid.pl , and repl-monitor.pl legacy scripts in Directory Server. These scripts have been replaced with the following commands: dbverify : dsctl instance_name dbverify validate-syntax.pl : dsconf schema validate-syntax cl-dump.pl : dsconf replication dump-changelog fixup-memberuid.pl : dsconf plugin posix-winsync fixup repl-monitor.pl : dsconf replication monitor For a list of all legacy scripts and their replacements, see Command-line utilities replaced in Red Hat Directory Server 11 . ( BZ#1739718 ) Setting up IdM as a hidden replica is now fully supported Identity Management (IdM) in RHEL 8.2 fully supports setting up IdM servers as hidden replicas. A hidden replica is an IdM server that has all services running and available. However, it is not advertised to other clients or masters because no SRV records exist for the services in DNS, and LDAP server roles are not enabled. Therefore, clients cannot use service discovery to detect hidden replicas. Hidden replicas are primarily designed for dedicated services that can otherwise disrupt clients. For example, a full backup of IdM requires to shut down all IdM services on the master or replica. Since no clients use a hidden replica, administrators can temporarily shut down the services on this host without affecting any clients. Other use cases include high-load operations on the IdM API or the LDAP server, such as a mass import or extensive queries. To install a new hidden replica, use the ipa-replica-install --hidden-replica command. To change the state of an existing replica, use the ipa server-state command. For further details, see Installing an IdM hidden replica . ( BZ#1719767 ) Kerberos ticket policy now supports authentication indicators Authentication indicators are attached to Kerberos tickets based on which pre-authentication mechanism has been used to acquire the ticket: otp for two-factor authentication (password + OTP) radius for RADIUS authentication pkinit for PKINIT, smart card or certificate authentication hardened for hardened passwords (SPAKE or FAST) The Kerberos Distribution Center (KDC) can enforce policies such as service access control, maximum ticket lifetime, and maximum renewable age, on the service ticket requests which are based on the authentication indicators. With this enhancement, administrators can achieve finer control over service ticket issuance by requiring specific authentication indicators from a user's tickets. ( BZ#1777564 ) The krb5 package is now FIPS-compliant With this enhancement, non-compliant cryptography is prohibited. As a result, administrators can use Kerberos in FIPS-regulated environments. (BZ#1754690) Directory Server sets the sslVersionMin parameter based on the system-wide crypto policy By default, Directory Server now sets the value of the sslVersionMin parameter based on the system-wide crypto policy. If you set the crypto policy profile in the /etc/crypto-policies/config file to: DEFAULT , FUTURE , or FIPS , Directory Server sets sslVersionMin to TLS1.2 LEGACY , Directory Server sets sslVersionMin to TLS1.0 Alternatively, you can manually set sslVersionMin to higher value than the one defined in the crypto policy: (BZ#1828727) SSSD now enforces AD GPOs by default The default setting for the SSSD option ad_gpo_access_control is now enforcing . In RHEL 8, SSSD enforces access control rules based on Active Directory Group Policy Objects (GPOs) by default. Red Hat recommends ensuring GPOs are configured correctly in Active Directory before upgrading from RHEL 7 to RHEL 8. If you would not like to enforce GPOs, change the value of the ad_gpo_access_control option in the /etc/sssd/sssd.conf file to permissive . (JIRA:RHELPLAN-51289) 5.1.13. Desktop Wayland is now enabled on dual-GPU systems Previously, the GNOME environment defaulted to the X11 session on laptops and other systems that have two graphical processing units (GPUs). With this release, GNOME now defaults to the Wayland session on dual-GPU systems, which is the same behavior as on single-GPU systems. (BZ#1749960) 5.1.14. Graphics infrastructures Support for new graphics cards The following graphics cards are now supported: Intel HD Graphics 610, 620, and 630, which are found with the Intel Comet Lake H and U processors Intel Ice Lake UHD Graphics 910 and Iris Plus Graphics 930, 940, and 950. You no longer need to set the alpha_support kernel option to enable support for Intel Ice Lake graphics. The AMD Navi 10 family, which includes the following models: Radeon RX 5600 Radeon RX 5600 XT Radeon RX 5700 Radeon RX 5700 XT Radeon Pro W5700 The Nvidia Turing TU116 family, which includes the following models. Note that the nouveau graphics driver does not yet support 3D acceleration with the Nvidia Turing TU116 family. GeForce GTX 1650 Super GeForce GTX 1660 GeForce GTX 1660 Super GeForce GTX 1660 Ti GeForce GTX 1660 Ti Max-Q Additionally, the following graphics drivers have been updated: The Matrox mgag2000 driver The Aspeed ast driver The Intel i915 driver (JIRA:RHELPLAN-41384) 5.1.15. The web console Administrators can now use client certificates to authenticate to the RHEL 8 web console With this web console enhancement, a system administrator can use client certificates to access a RHEL 8 system locally or remotely using a browser with certificate authentication built in. No additional client software is required. These certificates are commonly provided by a smart card or Yubikey, or can be imported into the browser. When logging in with a certificate, the user cannot currently perform administrative actions in the web console. But the user can perform them on the Terminal page with the sudo command after authenticating with a password. (JIRA:RHELPLAN-2507) Option to log in to the web console with a TLS client certificate With this update, it is possible to configure the web console to log in with a TLS client certificate that is provided by a browser or a device such as a smart card or a YubiKey. ( BZ#1678465 ) Changes to web console login RHEL web console has been updated with the following changes: The web console will automatically log you out of your current session after 15 minutes of inactivity. You can configure the timeout in minutes in the /etc/cockpit/cockpit.conf file. Similarly to SSH, the web console can now optionally show the content of banner files on the login screen. Users need to configure the functionality in the /etc/cockpit/cockpit.conf file. See the cockpit.conf(5) manual page for more information. ( BZ#1754163 ) The RHEL web console has been redesigned to use the PatternFly 4 user interface design system The new design provides better accessibility and matches the design of OpenShift 4. Updates include: The Overview page has been completely redesigned. For example, information is grouped into easier-to-understand panels, health information is more prominent, resource graphs have been moved to their own page, and the hardware information page is now easier to find. Users can use the new Search field in the Navigation menu to easily find specific pages that are based on keywords. For more information about PatternFly, see the PatternFly project page. ( BZ#1784455 ) Virtual Machines page updates The web console's Virtual Machines page got several storage improvements: Storage volume creation now works for all libvirt-supported types. Storage pools can be created on LVM or iSCSI. Additionally, the Virtual Machines page now supports the creation and removal of virtual network interfaces. ( BZ#1676506 , BZ#1672753 ) Web console Storage page updates Usability testing showed that the default mount point concept on the RHEL web console Storage page was hard to grasp, and led to a lot of confusion. With this update, the web console no longer offers a Default choice when mounting a file system. Creating a new file system now always requires a specified mount point. Additionally, the web console now hides the distinction between the configuration ( /etc/fstab ) and the run-time state ( /proc/mounts ). Changes made in the web console always apply to both the configuration and the run-time state. When the configuration and the run-time state differ from each other, the web console shows a warning, and enable users to easily bring them back in sync. ( BZ#1784456 ) 5.1.16. Virtualization Attempting to create a RHEL virtual machine from an install tree now returns a more helpful error message. RHEL 7 and RHEL 8 virtual machines created using the virt-install utility with the --location option in some cases fail to boot. This update adds a virt-install error message that provides instructions on how to work around this problem. (BZ#1677019) Intel Xeon Platinum 9200 series processors supported on KVM guests Support for Intel Xeon Platinum 9200 series processors (previously known as Cascade Lake ) has now been added to the KVM hypervisor and kernel code, and to the libvirt API. This enables KVM virtual machines to use Intel Xeon Platinum 9200 series processors. (JIRA:RHELPLAN-13995) EDK2 rebased to version stable201908 The EDK2 package has been upgraded to version stable201908 , which provides multiple enhancements. Notably: EDK2 now includes support for OpenSSL-1.1.1. To comply with the upstream project's licensing requirements, the EDK2 package license has been changed from BSD and OpenSSL and MIT to BSD-2-Clause-Patent and OpenSSL and MIT . ( BZ#1748180 ) Creating nested virtual machines With this update, nested virtualization is fully supported for KVM virtual machines (VMs) running on an Intel 64 host with RHEL 8. With this feature, a RHEL 7 or RHEL 8 VM that runs on a physical RHEL 8 host can act as a hypervisor, and host its own VMs. Note that on AMD64 systems, nested KVM virtualization remains a Technology Preview. (JIRA:RHELPLAN-14047, JIRA:RHELPLAN-24437) 5.1.17. Containers The default registries search list in /etc/containers/registries.conf has been updated The default registries.search list in /etc/containers/registries.conf has been updated to only include trusted registries that provide container images curated, patched, and maintained by Red Hat and its partners. Red Hat recommends always using fully qualified image names including: The registry server (full DNS name) Namespace Image name Tag (for example registry.redhat.io/ubi8/ubu:latest ) When using short names, there is always an inherent risk of spoofing For example, a user wants to pull an image named foobar from a registry and expects it to come from myregistry.com . If myregistry.com is not first in the search list, an attacker could place a different foobar image at a registry earlier in the search list. The user would accidentally pull and run the attacker image and code rather than the intended content. Red Hat recommends only adding registries which are trusted, that is registries which do not allow unknown or anonymous users to create accounts with arbitrary names. This prevents an image from being spoofed, squatted or otherwise made insecure. ( BZ#1810053 ) Podman no longer depends on oci-systemd-hook Podman does not need or depend on the oci-systemd-hook package which has been removed from the container-tools:rhel8 and container-tools:2.0 module streams. (BZ#1645280) 5.2. Important changes to external kernel parameters This chapter provides system administrators with a summary of significant changes in the kernel distributed with Red Hat Enterprise Linux 8.2. These changes include added or updated proc entries, sysctl , and sysfs default values, boot parameters, kernel configuration options, or any noticeable behavior changes. 5.2.1. New kernel parameters cpuidle.governor = [CPU_IDLE] Name of the cpuidle governor to use. deferred_probe_timeout = [KNL] This is a debugging parameter for setting a timeout in seconds for the deferred probe to give up waiting on dependencies to probe. Only specific dependencies (subsystems or drivers) that have opted in will be ignored. A timeout of 0 will timeout at the end of initcalls . This parameter will also dump out devices still on the deferred probe list after retrying. kvm.nx_huge_pages = [KVM] This parameter controls the software workaround for the X86_BUG_ITLB_MULTIHIT bug. The options are: force - Always deploy workaround. off - Never deploy workaround. auto (default) - Deploy workaround based on the presence of X86_BUG_ITLB_MULTIHIT . If the software workaround is enabled for the host, guests do not need to enable it for nested guests. kvm.nx_huge_pages_recovery_ratio = [KVM] This parameter controls how many 4KiB pages are periodically zapped back to huge pages. 0 disables the recovery, otherwise if the value is N, Kernel-based Virtual Machine (KVM) will zap 1/Nth of the 4KiB pages every minute. The default is 60. page_alloc.shuffle = [KNL] Boolean flag to control whether the page allocator should randomize its free lists. The randomization may be automatically enabled if the kernel detects it is running on a platform with a direct-mapped memory-side cache. This parameter can be used to override/disable that behavior. The state of the flag can be read from the sysfs pseudo filesystem from the /sys/module/page_alloc/parameters/shuffle file. panic_print = Bitmask for printing system info when panic happens. The user can chose combination of the following bits: bit 0: print all tasks info bit 1: print system memory info bit 2: print timer info bit 3: print locks info if the CONFIG_LOCKDEP kernel configuration is on bit 4: print the ftrace buffer bit 5: print all printk messages in buffer rcutree.sysrq_rcu = [KNL] Commandeer a sysrq key to dump out Tree RCU's rcu_node tree with an eye towards determining why a new grace period has not yet started. rcutorture.fwd_progress = [KNL] Enable Read-copy update (RCU) grace-period forward-progress testing for the types of RCU supporting this notion. rcutorture.fwd_progress_div = [KNL] Specify the fraction of a CPU-stall-warning period to do tight-loop forward-progress testing. rcutorture.fwd_progress_holdoff = [KNL] Number of seconds to wait between successive forward-progress tests. rcutorture.fwd_progress_need_resched = [KNL] Enclose cond_resched() calls within checks for need_resched() during tight-loop forward-progress testing. tsx = [X86] This parameter controls the Transactional Synchronization Extensions (TSX) feature in Intel processors that support TSX control. The options are: on - Enable TSX on the system. Although there are mitigations for all known security vulnerabilities, TSX accelerated several speculation-related CVEs. As a result, there may be unknown security risks associated with leaving it enabled. off - Disable TSX on the system. This option takes effect only on newer CPUs which are not vulnerable to Microarchitectural Data Sampling (MDS). In other words they have MSR_IA32_ARCH_CAPABILITIES.MDS_NO=1 and get the new IA32_TSX_CTRL Model-specific register (MSR) through a microcode update. This new MSR allows for a reliable deactivation of the TSX functionality. auto - Disable TSX if X86_BUG_TAA is present, otherwise enable TSX on the system. Not specifying this parameter is equivalent to tsx=off . For details see the upstream kernel documentation . tsx_async_abort = [X86,INTEL] This parameter controls mitigation for the TSX Async Abort (TAA) vulnerability. Similar to Micro-architectural Data Sampling (MDS), certain CPUs that support Transactional Synchronization Extensions (TSX) are vulnerable to an exploit against CPU internal buffers. The exploit is able to forward information to a disclosure gadget under certain conditions. In vulnerable processors, the speculatively forwarded data can be used in a cache side channel attack, to access data to which the attacker does not have direct access. The options are: full - Enable TAA mitigation on vulnerable CPUs if TSX is enabled. full,nosmt - Enable TAA mitigation and disable Simultaneous Multi Threading (SMT) on vulnerable CPUs. If TSX is disabled, SMT is not disabled because CPU is not vulnerable to cross-thread TAA attacks. off - Unconditionally disable TAA mitigation. On MDS-affected machines, the tsx_async_abort=off parameter can be prevented by an active MDS mitigation as both vulnerabilities are mitigated with the same mechanism. Therefore, to disable this mitigation, you need to specify the mds=off parameter as well. Not specifying this option is equivalent to tsx_async_abort=full . On CPUs which are MDS affected and deploy MDS mitigation, TAA mitigation is not required and does not provide any additional mitigation. For details see the upstream kernel documentation . 5.2.2. Updated kernel parameters intel_iommu = [DMAR] Intel IOMMU driver Direct Memory Access Remapping (DMAR). The options are: sm_on [Default Off] - By default, scalable mode will be disabled even if the hardware advertises that it has support for the scalable mode translation. With this option set, scalable mode will be used on hardware which claims to support it. isolcpus = [KNL,SMP,ISOL] This parameter isolates a given set of CPUs from disturbance. managed_irq - A sub-parameter, which prevents the isolated CPUs from being targeted by managed interrupts, which have an interrupt mask containing isolated CPUs. The affinity of managed interrupts is handled by the kernel and cannot be changed via the /proc/irq/* interfaces. This isolation is the best effort and is only effective if the automatically assigned interrupt mask of a device queue contains isolated and housekeeping CPUs. If the housekeeping CPUs are online then such interrupts are directed to the housekeeping CPU so that I/O submitted on the housekeeping CPU cannot disturb the isolated CPU. If the queue's affinity mask contains only isolated CPUs then this parameter has no effect on the interrupt routing decision. However the interrupts are only delivered when the tasks running on those isolated CPUs submit I/O. I/O submitted on the housekeeping CPUs has no influence on those queues. mds = [X86,INTEL] The changes to options: off - On TSX Async Abort (TAA)-affected machines, mds=off can be prevented by an active TAA mitigation as both vulnerabilities are mitigated with the same mechanism. So in order to disable this mitigation, you need to specify the tsx_async_abort=off kernel parameter too. Not specifying this parameter is equivalent to mds=full . For details see the upstream kernel documentation . mem_encrypt = [X86-64] AMD Secure Memory Encryption (SME) control ... For details on when the memory encryption can be activated, see the upstream kernel documentation . mitigations = The changes to options: off - Disable all optional CPU mitigations. This improves system performance, but it may also expose users to several CPU vulnerabilities. Equivalent to: nopti [X86,PPC] kpti=0 [ARM64] nospectre_v1 [X86,PPC] nobp=0 [S390] nospectre_v2 [X86,PPC,S390,ARM64] spectre_v2_user=off [X86] spec_store_bypass_disable=off [X86,PPC] ssbd=force-off [ARM64] l1tf=off [X86] mds=off [X86] tsx_async_abort=off [X86] kvm.nx_huge_pages=off [X86] Exceptions: This does not have any effect on kvm.nx_huge_pages when kvm.nx_huge_pages=force . auto,nosmt - Mitigate all CPU vulnerabilities, disabling Simultaneous Multi Threading (SMT) if needed. This option is for users who always want to be fully mitigated, even if it means losing SMT. Equivalent to: l1tf=flush,nosmt [X86] mds=full,nosmt [X86] tsx_async_abort=full,nosmt [X86] rcutree.jiffies_till_sched_qs = [KNL] This parameter sets the required age in jiffies for a given grace period before Read-copy update (RCU) starts soliciting quiescent-state help from the rcu_note_context_switch() and cond_resched() functions. If not specified, the kernel will calculate a value based on the most recent settings of the rcutree.jiffies_till_first_fqs and rcutree.jiffies_till_next_fqs kernel parameters. This calculated value may be viewed in the rcutree.jiffies_to_sched_qs kernel parameter. Any attempt to set rcutree.jiffies_to_sched_qs will be overwritten. tsc = This parameter disables clocksource stability checks for Time Stamp Counter (TSC). Format: <string> The options are: reliable [x86] - Marks the TSC clocksource as reliable. This option disables the clocksource verification at runtime, as well as the stability checks done at bootup. The option also enables the high-resolution timer mode on older hardware, and in virtualized environment. noirqtime [x86] - Do not use TSC to do Interrupt Request (IRQ) accounting. Used to run time disable IRQ_TIME_ACCOUNTING on any platforms where Read Time-Stamp Counter (RDTSC) is slow and this accounting can add overhead. unstable [x86] - Marks the TSC clocksource as unstable. This option marks the TSC unconditionally unstable at bootup and avoids any further wobbles once the TSC watchdog notices. nowatchdog [x86] - Disables the clocksource watchdog. The option is used in situations with strict latency requirements where interruptions from the clocksource watchdog are not acceptable. 5.2.3. New /proc/sys/kernel parameters panic_print Bitmask for printing the system info when panic occurs. The user can chose the combination of the following bits: bit 0: print all tasks info bit 1: print system memory info bit 2: print timer info bit 3: print locks info if the CONFIG_LOCKDEP kernel configuration item is on bit 4: print ftrace buffer For example, to print tasks and memory info on panic, execute: 5.2.4. Updated /proc/sys/kernel parameters threads-max This parameter controls the maximum number of threads the fork() function can create. During initialization, the kernel sets this value in such a way that even if the maximum number of threads is created, the thread structures occupy only a part (1/8th) of the available RAM pages. The minimum value that can be written to threads-max is 1. The maximum value is given by the constant FUTEX_TID_MASK (0x3fffffff) . If a value outside of this range is written to threads-max , an error EINVAL occurs. 5.2.5. Updated /proc/sys/net parameters bpf_jit_enable This parameter enables the Berkeley Packet Filter Just-in-Time (BPF JIT) compiler. BPF is a flexible and efficient infrastructure allowing to execute bytecode at various hook points. It is used in a number of Linux kernel subsystems such as networking (for example XDP , tc ), tracing (for example kprobes , uprobes , tracepoints ) and security (for example seccomp ). LLVM has a BPF back-end that can compile restricted C into a sequence of BPF instructions. After program load through the bpf() system call and passing a verifier in the kernel, JIT will then translate these BPF proglets into native CPU instructions. There are two flavors of JIT , the newer eBPF JIT is currently supported on the following CPU architectures: x86_64 arm64 ppc64 (both little and big endians) s390x 5.3. Device Drivers This chapter provides a comprehensive listing of all device drivers that are new or have been updated in Red Hat Enterprise Linux 8.2. 5.3.1. New drivers Network drivers gVNIC Driver (gve.ko.xz) Broadcom UniMAC MDIO bus controller (mdio-bcm-unimac.ko.xz) Software iWARP Driver (siw.ko.xz) Graphics drivers and miscellaneous drivers DRM VRAM memory-management helpers (drm_vram_helper.ko.xz) cpuidle driver for haltpoll governor (cpuidle-haltpoll.ko.xz) stm_ftrace driver (stm_ftrace.ko.xz) stm_console driver (stm_console.ko.xz) System Trace Module device class (stm_core.ko.xz) dummy_stm device (dummy_stm.ko.xz) stm_heartbeat driver (stm_heartbeat.ko.xz) Intel(R) Trace Hub Global Trace Hub driver (intel_th_gth.ko.xz) Intel(R) Trace Hub PTI/LPP output driver (intel_th_pti.ko.xz) Intel(R) Trace Hub controller driver (intel_th.ko.xz) Intel(R) Trace Hub Memory Storage Unit driver (intel_th_msu.ko.xz) Intel(R) Trace Hub Software Trace Hub driver (intel_th_sth.ko.xz) Intel(R) Trace Hub Memory Storage Unit software sink (intel_th_msu_sink.ko.xz) Intel(R) Trace Hub PCI controller driver (intel_th_pci.ko.xz) Intel(R) Trace Hub ACPI controller driver (intel_th_acpi.ko.xz) MC Driver for Intel 10nm server processors (i10nm_edac.ko.xz) Device DAX: direct access mapping device (dax_pmem_core.ko.xz) PMEM DAX: direct access to persistent memory (dax_pmem.ko.xz) PMEM DAX: support the deprecated /sys/class/dax interface (dax_pmem_compat.ko.xz) Intel PMC Core platform init (intel_pmc_core_pltdrv.ko.xz) Intel RAPL (Running Average Power Limit) control via MSR interface (intel_rapl_msr.ko.xz) Intel Runtime Average Power Limit (RAPL) common code (intel_rapl_common.ko.xz) Storage drivers Clustering support for MD (md-cluster.ko.xz) 5.3.2. Updated drivers Network driver updates VMware vmxnet3 virtual NIC driver (vmxnet3.ko.xz) has been updated to version 1.4.17.0-k. Intel(R) 10 Gigabit Virtual Function Network Driver (ixgbevf.ko.xz) has been updated to version 4.1.0-k-rh8.2.0. Intel(R) 10 Gigabit PCI Express Network Driver (ixgbe.ko.xz) has been updated to version 5.1.0-k-rh8.2.0. Intel(R) Ethernet Connection E800 Series Linux Driver (ice.ko.xz) has been updated to version 0.8.1-k. The Netronome Flow Processor (NFP) driver (nfp.ko.xz) has been updated to version 4.18.0-185.el8.x86_64. Elastic Network Adapter (ENA) (ena.ko.xz) has been updated to version 2.1.0K. Graphics and miscellaneous driver updates HPE watchdog driver (hpwdt.ko.xz) has been updated to version 2.0.3. Intel I/OAT DMA Linux driver (ioatdma.ko.xz) has been updated to version 5.00. Storage driver updates Driver for HPE Smart Array Controller (hpsa.ko.xz) has been updated to version 3.4.20-170-RH4. LSI MPT Fusion SAS 3.0 Device Driver (mpt3sas.ko.xz) has been updated to version 32.100.00.00. QLogic FCoE Driver (bnx2fc.ko.xz) has been updated to version 2.12.10. Emulex LightPulse Fibre Channel SCSI driver (lpfc.ko.xz) has been updated to version 0:12.6.0.2. QLogic FastLinQ 4xxxx FCoE Module (qedf.ko.xz) has been updated to version 8.42.3.0. QLogic Fibre Channel HBA Driver (qla2xxx.ko.xz) has been updated to version 10.01.00.21.08.2-k. Driver for Microsemi Smart Family Controller version (smartpqi.ko.xz) has been updated to version 1.2.10-025. QLogic FastLinQ 4xxxx iSCSI Module (qedi.ko.xz) has been updated to version 8.37.0.20. Broadcom MegaRAID SAS Driver (megaraid_sas.ko.xz) has been updated to version 07.710.50.00-rc1. 5.4. Bug fixes This part describes bugs fixed in Red Hat Enterprise Linux 8.2 that have a significant impact on users. 5.4.1. Installer and image creation Using the version or inst.version kernel boot parameters no longer stops the installation program Previously, booting the installation program from the kernel command line using the version or inst.version boot parameters printed the version, for example anaconda 30.25.6 , and stopped the installation program. With this update, the version and inst.version parameters are ignored when the installation program is booted from the kernel command line, and as a result, the installation program is not stopped. (BZ#1637472) Support secure boot for s390x in the installer Previously, RHEL 8.1 provided support for preparing boot disks for use in IBM Z environments that enforced the use of secure boot. The capabilities of the server and hypervisor used during installation determined if the resulting on-disk format contained secure boot support. There was no way to influence the on-disk format during installation. Consequently, if you installed RHEL 8.1 in an environment that supported secure boot, the system was unable to boot when moved to an environment that lacked secure boot support, as is done in some failover scenarios. With this update, you can now configure the secure boot option of the zipl tool. To do so, you can use either: The Kickstart zipl command and one of its options, for example: --secure-boot , --no-secure-boot , and --force-secure-boot . From the Installation Summary window in the GUI, you can select the System > Installation Destination > Full disk summary and boot loader link and set the boot device. As a result, the installation can now be booted in environments that lack secure boot support. (BZ#1659400) The secure boot feature is now available Previously, the default value for the secure= boot option was not set to auto , and as a result, the secure boot feature was not available. With this update, unless previously configured, the default value is set to auto , and the secure boot feature is now available. (BZ#1750326) The /etc/sysconfig/kernel file no longer references the new-kernel-pkg script Previously, the /etc/sysconfig/kernel file referenced the new-kernel-pkg script. However, the new-kernel-pkg script is not included in a RHEL 8 system. With this update, the reference to the new-kernel-pkg script has been removed from the /etc/sysconfig/kernel file. ( BZ#1747382 ) The installation does not set more than the maximum number of allowed devices in the boot-device NVRAM variable Previously, the RHEL 8 installation program set more than the maximum number of allowed devices in the boot-device NVRAM variable. As a result, the installation failed on systems that had more than the maximum number of devices. With this update, the RHEL 8 installation program now checks the maximum device setting and only adds the permitted number of devices. (BZ#1748756) Installations work for an image location that uses a URL command in a Kickstart file located in a non-network location Previously, the installation failed early in the process when network activation triggered by the image remote location was specified by a URL command in a Kickstart file located in a non-network location. This update fixes the issue, and installations that provide the image location by using a URL command in a Kickstart file that is located in a non-network location, for example, a CD-ROM or local block device, now work as expected. (BZ#1649359) The RHEL 8 installation program only checks ECKD DASD for unformatted devices Previously, when checking for unformatted devices, the installation program checked all DASD devices. However, the installation program should only have checked ECKD DASD devices. As a consequence, the installation failed with a traceback when an FBA DASD device with SWAPGEN was used. With this update, the installation program does not check FBA DASD devices, and the installation completes successfully. (BZ#1715303) 5.4.2. Software management yum repolist no longer ends on first unavailable repository Previously, the repository configuration option skip_if_unavailable was by default set as follows: This setting forced the yum repolist command to end on first unavailable repository with an error and exit status 1. Consequently, yum repolist did not continue listing available repositories. With this update, yum repolist has been fixed to no longer require any downloads. As a result, yum repolist does not provide any output requiring metadata, and the command now continues listing available repositories as expected. Note that the number of available packages is only returned by yum repolist --verbose or yum repoinfo that still require available metadata. Therefore these commands will end on the first unavailable repository. (BZ#1697472) 5.4.3. Shells and command-line tools ReaR updates RHEL 8.2 introduces a number of updates to the Relax-and-Recover ( ReaR ) utility. The build directory handling has been changed. Previously, the build directory was kept in a temporary location in case ReaR encountered a failure. With this update, the build directory is deleted by default in non-interactive runs to prevent consuming disk space. The semantics of the KEEP_BUILD_DIR configuration variable has been enhanced to include a new errors value. You can set the KEEP_BUILD_DIR variable to the following values: errors to preserve the build directory on errors for debugging (the behavior) y ( true ) to always preserve the build directory n ( false ) to never preserve the build directory The default value is an empty string with the meaning of errors when ReaR is being executed interactively (in a terminal) and false if ReaR is being executed non-interactively. Note that KEEP_BUILD_DIR is automatically set to true in debug mode ( -d ) and in debugscript mode ( -D ); this behavior has not been changed. Notable bug fixes include: Support for NetBackup 8.0 has been fixed. ReaR no longer aborts with a bash error similar to xrealloc: cannot allocate on systems with a large number of users, groups, and users per group. The bconsole command now shows its prompt, which enables you to perform a restore operation when using the Bacula integration. ReaR now correctly backs up files also in situations when the docker service is running but no docker root directory has been defined, or when it is impossible to determine the status of the docker service. Recovery no longer fails when using thin pools or recovering a system in Migration Mode. Extremely slow rebuild of initramfs during the recovery process with LVM has been fixed. ReaR now creates a working bootable ISO image on the AMD and Intel 64-bit architectures when using the UEFI bootloader. Booting a rescue image in this setup no longer aborts in Grub with the error message Unknown command 'configfile' (... ) Entering rescue mode... . Support for GRUB_RESCUE in this setup, which previously could fail due to missing XFS filesystem support, has also been fixed. ( BZ#1729501 ) mlocate-updatedb.timer is now enabled during the mlocate package installation Previously, reindexing of the file database was not performed automatically, because the mlocate-updatedb.timer timer was disabled after the mlocate package installation. With this update, the mlocate-updatedb.timer timer is now a part of the 90-default.preset file and is enabled by default after the mlocate package installation. As a result, the file database is updated automatically. ( BZ#1817591 ) 5.4.4. Infrastructure services dnsmasq now correctly handles the non-recursive DNS queries Previously, dnsmasq forwarded all the non-recursive queries to an upstream server, which led to different responses. With this update, the non-recursive queries to local known names, such as DHCP host lease names or hosts read from the /etc/hosts file, are handled by dnsmasq and are not forwarded to an upstream server. As a result, the same response as to recursive queries to known names is returned. ( BZ#1700916 ) dhclient no longer fails to renew the IP address after system time changes Previously, if the system time changed, the system could lose the IP address assigned due to the removal by the kernel. With this update, dhclient uses monotonic timer to detect backward time jumps and issues the DHCPREQUEST message for lease extension in case of discontinuous jump in the system time. As a result, the system no longer loses the IP address in the described scenario. ( BZ#1729211 ) ipcalc now returns the correct broadcast address for the /31 networks This update fixes the ipcalc utility to follow the RFC 3021 standard properly. As a result, ipcalc returns the correct broadcast address when the /31 prefix is used on an interface. (BZ#1638834) /etc/services now contains proper NRPE port definition This update adds the proper Nagios Remote Plug-in Executor (NRPE) service port definition to the /etc/services file. ( BZ#1730396 ) The postfix DNS resolver code now uses res_search instead of res_query Following its update in postfix , the DNS resolver code used the res_query function instead of the res_search function. As a consequence, the DNS resolver did not search host names in the current and parent domains with the following postfix configuration: For example, for: and the domain name in the example.com format, the DNS resolver did not use the smtp. example.com SMTP server for relaying. With this update, the DNS resolver code has been changed to use res_search instead of res_query , and it now searches the host names in the current and parent domains correctly. ( BZ#1723950 ) PCRE, CDB, and SQLite can now be used with Postfix In RHEL 8, the postfix package has been split into multiple subpackages, each subpackage providing a plug-in for a specific database. Previously, RPM packages containing the postfix-pcre , postfix-cdb , and postfix-sqlite plug-ins were not distributed. Consequently, databases with these plug-ins could not be used with Postfix. This update adds RPM packages containing the PCRE, CDB, and SQLite plug-ins to the AppStream repository. As a result, these plug-ins can be used after the appropriate RPM package is installed. ( BZ#1745321 ) 5.4.5. Security fapolicyd no longer prevents RHEL updates When an update replaces the binary of a running application, the kernel modifies the application binary path in memory by appending the " (deleted)" suffix. Previously, the fapolicyd file access policy daemon treated such applications as untrusted, and prevented them from opening and executing any other files. As a consequence, the system was sometimes unable to boot after applying updates. With the release of the RHBA-2020:5243 advisory, fapolicyd ignores the suffix in the binary path so the binary can match the trust database. As a result, fapolicyd enforces the rules correctly and the update process can finish. (BZ#1897091) openssl-pkcs11 no longer locks devices by attempting to log in to multiple devices Previously, the openssl-pkcs11 engine attempted to log in to the first result of a search using the provided PKCS #11 URI and used the provided PIN even if the first result was not the intended device and the PIN matched another device. These failed authentication attempts locked the device. openssl-pkcs11 now attempts to log in to a device only if the provided PKCS #11 URI matches only a single device. The engine now intentionally fails in case the PKCS #11 search finds more than one device. For this reason, you must provide a PKCS #11 URI that matches only a single device when using openssl-pkcs11 to log in to the device. ( BZ#1705505 ) OpenSCAP offline scans using rpmverifyfile now work properly Prior to this update, the OpenSCAP scanner did not correctly change the current working directory in offline mode, and the fchdir function was not called with the correct arguments in the OpenSCAP rpmverifyfile probe. The OpenSCAP scanner has been fixed to correctly change the current working directory in offline mode, and the fchdir function has been fixed to use correct arguments in rpmverifyfile . As a result, SCAP content that contains OVAL rpmverifyfile can be used by OpenSCAP to scan arbitrary file systems. (BZ#1636431) httpd now starts correctly if using an ECDSA private key without matching public key stored in a PKCS #11 device Unlike RSA keys, ECDSA private keys do not necessarily contain public-key information. In this case, you cannot obtain the public key from an ECDSA private key. For this reason, a PKCS #11 device stores public-key information in a separate object whether it is a public-key object or a certificate object. OpenSSL expected the EVP_PKEY structure provided by an engine for a private key to contain the public-key information. When filling the EVP_PKEY structure to be provided to OpenSSL, the engine in the openssl-pkcs11 package tried to fetch the public-key information only from matching public-key objects and ignored the present certificate objects. When OpenSSL requested an ECDSA private key from the engine, the provided EVP_PKEY structure did not contain the public-key information if the public key was not present in the PKCS #11 device, even when a matching certificate that contained the public key was available. As a consequence, since the Apache httpd web server called the X509_check_private_key() function, which requires the public key, in its start-up process, httpd failed to start in this scenario. This problem has been solved by loading the EC public key from the certificate if the public-key object is not available. As a result, httpd now starts correctly when ECDSA keys are stored in a PKCS #11 device. ( BZ#1664807 ) scap-security-guide PCI-DSS remediations of Audit rules now work properly Previously, the scap-security-guide package contained a combination of remediation and a check that could result in one of the following scenarios: incorrect remediation of Audit rules scan evaluation containing false positives where passed rules were marked as failed Consequently, during the RHEL installation process, scanning of the installed system reported some Audit rules as either failed or errored. With this update, the remediations have been fixed, and scanning of the system installed with the PCI-DSS security policy no longer reports false positives for Audit rules. ( BZ#1754919 ) OpenSCAP now provides offline scanning of virtual machines and containers Previously, refactoring of the OpenSCAP codebase caused certain RPM probes to fail to scan VM and containers file systems in offline mode. Consequently, the following tools could not be included in the openscap-utils package: oscap-vm and oscap-chroot . Furthermore, the openscap-containers package was completely removed from RHEL 8. With this update, the problems in the probes have been fixed. As a result, RHEL 8 now contains the oscap-podman , oscap-vm , and oscap-chroot tools in the openscap-utils package. (BZ#1618489) OpenSCAP rpmverifypackage now works correctly Previously, the chdir and chroot system calls were called twice by the rpmverifypackage probe. Consequently, an error occurred when the probe was utilized during an OpenSCAP scan with custom Open Vulnerability and Assessment Language (OVAL) content. The rpmverifypackage probe has been fixed to properly utilize the chdir and chroot system calls. As a result, rpmverifypackage now works correctly. (BZ#1646197) 5.4.6. Networking Locking in the qdisc_run function now does not cause kernel crash Previously, a race condition when the pfifo_fast queue discipline resets while dequeuing traffic was leading to packet transmission after they were freed. As a consequence, sometimes kernel was getting terminated unexpectedly. With this update, locking in the qdisc_run function has been improved. As a result, kernel no longer crashes in the described scenario. (BZ#1744397) The DBus APIs in org.fedoraproject.FirewallD1.config.service work as expected Previously, the DBus API getIncludes , setIncludes , and queryIncludes functions in org.fedoraproject.FirewallD1 returned an error message: org.fedoraproject.FirewallD1.Exception: list index out of range due to bad indexing. With this update, the DBus API getIncludes , setIncludes , and queryIncludes functions work as expected. ( BZ#1737045 ) RHEL no longer logs a kernel warning when unloading the ipvs module Previously, the IP virtual server ( ipvs ) module used an incorrect reference counting, which caused a race condition when unloading the module. Consequently, RHEL logged a kernel warning. This update fixes the race condition. As a result, the kernel no longer logs the warning when you unload the ipvs module. (BZ#1687094) The nft utility no longer interprets arguments as command-line options after the first non-option argument Previously, the nft utility accepted options anywhere in an nft command. For example, admins could use options between or after non-option arguments. As a consequence, due to the leading dash, nft interpreted negative priority values as options, and the command failed. The nft utility's command-line parser has been updated to not interpret arguments that are starting with a dash after the first non-option argument has been read. As a result, admins no longer require workarounds to pass negative priority values to nft . Note that due to this change, you must now pass all command-options to nft before the first non-option argument. Before you update, verify your nftables scripts to match this new criteria to ensure that the script works as expected after you installed this update. ( BZ#1778883 ) The /etc/hosts.allow and /etc/hosts.deny files no longer contain outdated references to removed tcp_wrappers Previously, the /etc/hosts.allow and /etc/hosts.deny files contained outdated information about the tcp_wrappers package. The files are removed in RHEL 8 as they are no longer needed for tcp_wrappers which is removed. ( BZ#1663556 ) A configuration parameter has been added to firewalld to disable zone drifting Previously, the firewalld service contained an undocumented behavior known as "zone drifting". RHEL 8.0 removed this behavior because it could have a negative security impact. As a consequence, on hosts that used this behavior to configure a catch-all or fallback zone, firewalld denied connections that were previously allowed. This update re-adds the zone drifting behavior, but as a configurable feature. As a result, users can now decide to use zone drifting or disable the behavior for a more secure firewall setup. By default, in RHEL 8.2, the new AllowZoneDrifting parameter in the /etc/firewalld/firewalld.conf file is set to yes . Note that, if the parameter is enabled, firewalld logs: (BZ#1772208) 5.4.7. Kernel Subsection memory hotplug is now fully supported Previously, some platforms aligned physical memory regions such as Dual In-Line Modules (DIMMs) and interleave sets to 64MiB memory boundary. However, as the Linux hotplug subsystem uses a memory size of 128MiB, hot-plugging new devices caused multiple memory regions to overlap in a single hotplug memory window. Consequently, this caused failure in listing the available persistent memory namespaces with the following or a similar call trace: This update fixes the problem and supports Linux hotplug subsystem to enable multiple memory regions to share a single hotplug memory window. (BZ#1724969) Data corruption now triggers a BUG instead of a WARN message With this enhancement, the list corruptions at lib/list_debug.c now triggers a BUG, which generates a report with a vmcore . Previously, when encountering a data corruption, a simple WARN was generated, which was likely to go unnoticed. With set CONFIG_BUG_ON_DATA_CORRUPTION , the kernel now creates a crash and triggers a BUG in response to data corruption. This prevents further damage and reduces the security risk. The kdump now generates a vmcore , which improves the data corruption bug reporting. (BZ#1714330) Support for Intel Carlsville card is available but not verified in RHEL 8.2 The Intel Carlsville card support is available but not tested on Red Hat Enterprise Linux 8.2. (BZ#1720227) RPS and XPS no longer place jobs on isolated CPUs Previously, the Receive Packet Steering (RPS) software-queue mechanism and the Transmit Packet Steering (XPS) transmit queue selection mechanism allocated jobs on all CPU sets, including isolated CPUs. Consequently, this could cause an unexpected latency spike in a real-time environment when a latency-sensitive workload was using the same CPU where RPS or XPS jobs were running. With this update, the store_rps_map() function does not include any isolated CPUs for the purpose of RPS configuration. Similarly, the kernel drivers used for XPS configuration are respecting CPU isolation. As a result, RPS and XPS no longer place jobs on isolated CPUs in the described scenario. If you configure an isolated CPU in the /sys/devices/pci*/net/dev/queues/rx-*/rps_cpus file, the following error appears: However, manually configuring an isolated CPU in the /sys/devices/pci*/net/dev/queues/tx-*/xps_cpus file successfully allocates XPS jobs on the isolated CPU. Note that a networking workload in an environment with isolated CPUs is likely to experience some performance variation. (BZ#1867174) 5.4.8. File systems and storage SCSI drivers no longer use an excessive amount of memory Previously, certain SCSI drivers used a larger amount of memory than in RHEL 7. In certain cases, such as vPort creation on a Fibre Channel host bus adapter (HBA), the memory usage was excessive, depending upon the system configuration. The increased memory usage was caused by memory preallocation in the block layer. Both the multiqueue block device scheduling (BLK-MQ) and the multiqueue SCSI stack (SCSI-MQ) preallocated memory for each I/O request, leading to the increased memory usage. With this update, the block layer limits the amount of memory preallocation, and as a result, the SCSI drivers no longer use an excessive amount of memory. (BZ#1698297) VDO can now suspend before UDS has finished rebuilding Previously, the dmsetup suspend command became unresponsive if you attempted to suspend a VDO volume while the UDS index was rebuilding. The command finished only after the rebuild. With this update, the problem has been fixed. The dmsetup suspend command can finish before the UDS rebuild is done without becoming unresponsive. ( BZ#1737639 ) 5.4.9. Dynamic programming languages, web and database servers Problems in mod_cgid logging have been fixed Prior to this update, if the mod_cgid Apache httpd module was used under a threaded multi-processing module (MPM), the following logging problems occurred: The stderr output of the CGI script was not prefixed with standard timestamp information. The stderr output of the CGI script was not correctly redirected to a log file specific to the VirtualHost , if configured. This update fixes the problems, and mod_cgid logging now works as expected. (BZ#1633224) 5.4.10. Compilers and development tools Unrelocated and uninitialized shared objects no longer result in failures if dlopen fails Previously, if the dlopen call failed, the glibc dynamic linker did not remove shared objects with the NODELETE mark before reporting the error. Consequently, the unrelocated and uninitialized shared objects remained in the process image, eventually resulting in assertion failures or crashes. With this update, the dynamic loader uses a pending NODELETE state to remove shared objects upon dlopen failure, before marking them as NODELETE permanently. As a result, the process does not leave any unrelocated objects behind. Also, lazy binding failures while ELF constructors and destructors run now terminate the process. ( BZ#1410154 ) Advanced SIMD functions on the 64-bit ARM architecture no longer miscompile when lazily resolved Previously, the new vector Procedure Call Standard (PCS) for Advanced SIMD did not properly save and restore certain callee-saved registers when lazily resolving Advanced SIMD functions. As a consequence, binaries could misbehave at runtime. With this update, the Advanced SIMD and SVE vector functions in the symbol table are marked with .variant_pcs and, as a result, the dynamic linker will bind such functions early. ( BZ#1726641 ) The sudo wrapper script now parses options Previously, the /opt/redhat/devtoolset*/root/usr/bin/sudo wrapper script did not correctly parse sudo options. As a consequence, some sudo options (for example, sudo -i ) could not be executed. With this update, more sudo options are correctly parsed and, as a result, the sudo wrapper script works more like /usr/bin/sudo . ( BZ#1774118 ) Alignment of TLS variables in glibc has been fixed Previously, aligned thread-local storage (TLS) data could, under certain conditions, become instantiated without the expected alignment. With this update, the POSIX Thread Library libpthread has been enhanced to ensure correct alignment under any conditions. As a result, aligned TLS data is now correctly instantiated for all threads with the correct alignment. ( BZ#1764214 ) Repeated pututxline calls following EINTR or EAGAIN error no longer corrupt the utmp file When the pututxline function tries to acquire a lock and does not succeed in time, the function returns with EINTR or EAGAIN error code. Previously in this situation, if pututxline was called immediately again and managed to obtain the lock, it did not use an already-allocated matching slot in the utmp file, but added another entry instead. As a consequence, these unused entries increased the size of the utmp file substantially. This update fixes the issue, and the entries are added to the utmp file correctly now. ( BZ#1749439 ) mtrace no longer hangs when internal failures occur Previously, a defect in the mtrace tool implementation could cause memory tracing to hang. To fix this issue, the mtrace memory tracing implementation has been made more robust to avoid the hang even in the face of internal failures. As a result, users can now call mtrace and it no longer hangs, completing in bounded time. ( BZ#1764235 ) The fork function avoids certain deadlocks related to use of pthread_atfork Previously, if a program registered an atfork handler and invoked fork from an asynchronous-signal handler, a defect in the internal implementation-dependent lock could cause the program to freeze. With this update, the implementation of fork and its atfork handlers is adjusted to avoid the deadlock in single-threaded programs. ( BZ#1746928 ) strstr no longer returns incorrect matches for a truncated pattern On certain IBM Z platforms (z15, previously known as arch13), the strstr function did not correctly update a CPU register when handling search patterns that cross a page boundary. As a consequence, strstr returned incorrect matches. This update fixes the problem, and as a result, strstr works as expected in the mentioned scenario. ( BZ#1777241 ) C.UTF-8 locale source ellipsis expressions in glibc are fixed Previously, a defect in the C.UTF-8 source locale resulted in all Unicode code points above U+10000 lacking collation weights. As a consequence, all code points above U+10000 did not collate as expected. The C.UTF-8 source locale has been corrected, and the newly compiled binary locale now has collation weights for all Unicode code points. The compiled C.UTF-8 locale is 5.3MiB larger as a result of this fix. ( BZ#1361965 ) glibc no longer fails when getpwent() is called without calling setpwent() If your /etc/nsswitch.conf file pointed to the Berkeley DB ( db ) password provider, you could request data using the getpwent() function without first calling setpwent() only once. When you called the endpwent() function, further calls to getpwent() without first calling setpwent() caused glibc to fail because endpwent() could not reset the internals to allow a new query. This update fixes the problem. As a result, after you end one query with endpwent() , further calls to getpwent() will start a new query even if you do not call setpwent() . ( BZ#1747502 ) ltrace can now trace system calls in hardened binaries Previously, ltrace did not produce any results on certain hardened binaries, such as system binaries, on the AMD and Intel 64-bit architectures. With this update, ltrace can now trace system calls in hardened binaries. (BZ#1655368) Intel's JCC flaw no longer causes significant performance loss in the GCC compiler Certain Intel CPUs are affected by the Jump Conditional Code (JCC) bug causing machine instructions to be executed incorrectly. Consequently, the affected CPUs might not execute programs properly. The full fix involves updating the microcode of vulnerable CPUs, which can cause a performance degradation. This update enables a workaround in the assembler that helps to reduce the performance loss. The workaround is not enabled by default. To apply the workaround, recompile a program using GCC with the -Wa,-mbranches-within-32B-boundaries command-line option. A program recompiled with this command-line option will not be affected by the JCC flaw, but the microcode update is still necessary to fully protect a system. Note that applying the workaround will increase the size of the program and can still cause a slight performance decrease, although it should be less than it would have been without the recompilation. ( BZ#1777002 ) make no longer slows down when using parallel builds Previously, while running parallel builds, make sub-processes could become temporarily unresponsive when waiting for their turn to run. As a consequence, builds with high -j values slowed down or ran at lower effective -j values. With this update, the job control logic of make is now non-blocking. As a result, builds with high -j values run at full -j speed. ( BZ#1774790 ) The ltrace tool now reports function calls correctly Because of improvements to binary hardening applied to all RHEL components, the ltrace tool previously could not detect function calls in binary files coming from RHEL components. As a consequence, ltrace output was empty because it did not report any detected calls when used on such binary files. This update fixes the way ltrace handles function calls, which prevents the described problem from occurring. (BZ#1618748) 5.4.11. Identity Management The dsctl utility no longer fails to manage instances with a hyphen in their name Previously, the dsctl utility did not correctly parse hyphens in the Directory Server instance names. As a consequence, administrators could not use dsctl to manage instances with a hyphen in their name. This update fixes the problem, and dsctl now works as expected in the mentioned scenario. ( BZ#1715406 ) Directory Server instance names can now have up to 103 characters When an LDAP client establishes a connection to Directory Server, the server stores information related to the client address in a local buffer. Previously, the size of this buffer was too small to store an LDAPI path name longer than 46 characters. For example, this is the case if name of the Directory Server instance is too long. As a consequence, the server terminated unexpectedly due to an buffer overflow. This update increases the buffer size to the maximum size the Netscape Portable Runtime (NSPR) library supports for the path name. As a result, Directory Server no longer crashes in the mentioned scenario. Note that due to the limitation in the NSPR library, an instance name can be maximum 103 characters. ( BZ#1748016 ) The pkidestroy utility now picks the correct instance Previously, the pkidestroy --force command executed on a half-removed instance picked the pki-tomcat instance by default, regardless of the instance name specified with the -i instance option. As a consequence, this removed the pki-tomcat instance instead of the intended instance, and the --remove-logs option did not remove the intended instance's logs. pkidestroy now applies the right instance name, removing only the intended instance's leftovers. ( BZ#1698084 ) The ldap_user_authorized_service description has been updated in the sssd-ldap man page The Pluggable authentication modules (PAM) stack has been changed in RHEL 8. For example, the systemd user session now starts a PAM conversation using the systemd-user PAM service. This service now recursively includes the system-auth PAM service, which may include the pam_sss.so interface. This means that the SSSD access control is always called. You should be aware of this change when designing access control rules for RHEL 8 systems. For example, you can add the systemd-user service to the allowed services list. Please note for some access control mechanisms, such as IPA HBAC or AD GPOs, the systemd-user service has been added to the allowed services list by default and you do not need to take any action. The sssd-ldap man page has been updated to include this information. ( BZ#1669407 ) Information about required DNS records is now displayed when enabling support for AD trust in IdM Previously, when enabling support for Active Directory (AD) trust in Red Hat Enterprise Linux Identity Management (IdM) installation with external DNS management, no information about required DNS records was displayed. Entering the ipa dns-update-system-records --dry-run command manually was necesary to obtain a list of all DNS records required by IdM. With this update, the ipa-adtrust-install command correctly lists the DNS service records for manual addition to the DNS zone. ( BZ#1665051 ) Recursive DNS queries are now disabled by default in IdM servers with integrated DNS Previously, recursive queries were enabled by default when using an Identity Management (IdM) server with integrated DNS. As a consequence, it was possible to use the server for a DNS Amplification Attack. With this update, recursive DNS queries are now disabled by default, and it is no longer possible to use the server for a DNS Amplification Attack. You can manually allow recursive queries for specific clients by modifying the integrated DNS configuration on the IdM server. For example, to allow anyone to query the server for authoritative data, but only allow trusted clients to access your cache and recursion, list the clients in a trusted access control list (ACL): Create a trusted ACL in the /etc/named/ipa-ext.conf file: Add the trusted ACL to the /etc/named/ipa-options-ext.conf file: (BZ#2151696) 5.4.12. Desktop GNOME Shell on Wayland no longer performs slowly when using a software renderer Previously, the Wayland back end of GNOME Shell did not use a cacheable framebuffer when using a software renderer. As a consequence, software-rendered GNOME Shell on Wayland was slow compared to software-rendered GNOME Shell on the X.org back end. With this update, an intermediate shadow framebuffer has been added in GNOME Shell on Wayland. As a result, software-rendered GNOME Shell on Wayland now performs as well as GNOME Shell on X.org. (BZ#1737553) 5.4.13. Virtualization Starting a VM on a 10th generation Intel Core processor no longer fails Previously, starting a virtual machine (VM) failed on a host model that used a 10th generation Intel Core processor, also known as Icelake-Server. With this update, libvirt no longer attempts to disable the pconfig CPU feature which is not supported by QEMU. As a result, starting a VM on a host model running a 10th generation Intel processor no longer fails. ( BZ#1749672 ) Using cloud-init to provision virtual machines on Microsoft Azure now works correctly Previously, it was not possible to use the cloud-init utility to provision a RHEL 8 virtual machine (VM) on the Microsoft Azure platform. This update fixes the cloud-init handling of the Azure endpoints, and provisioning RHEL 8 VMs on Azure now proceeds as expected. (BZ#1641190) RHEL 8 virtual machines on RHEL 7 hosts can be reliably viewed in higher resolution than 1920x1200 Previously, when using a RHEL 8 virtual machine (VM) running on a RHEL 7 host system, certain methods of displaying the the graphical output of the VM, such as running the application in kiosk mode, could not use greater resolution than 1920x1200. As a consequence, displaying VMs using those methods only worked in resolutions up to 1920x1200 even if the host hardware supported higher resolutions. This update adjusts DRM and QXL drivers in a way to prevent the described problem from occurring. (BZ#1635295) Customizing an ESXi VM using cloud-init and rebooting the VM now works correctly Previously, if the cloud-init service was used to modify a virtual machine (VM) running on the VMware ESXi hypervisor to use static IP and the VM was then cloned, the new cloned VM in some cases took a very long time to reboot. This update modifies cloud-init not to rewrite the VM's static IP to DHCP, which prevents the described problem from occurring. (BZ#1666961, BZ#1706482 ) 5.4.14. Containers Pulling images from the quay.io registry no longer leads to unintended images Previously, having the quay.io container image registry listed in the default registries search list provided in /etc/containers/registries.conf could allow a user to pull a spoofed image when using a short name. To fix this issue, the quay.io container image registry has been removed from the default registries search list in /etc/containers/registries.conf . As a result, pulling images from the quay.io registry now requires users to specify the full repository name, such as quay.io/myorg/myimage . The quay.io registry can be added back to the default registries search list in /etc/containers/registries.conf to reenable pulling container images using short names, however, this is not recommended as it could create a security risk. ( BZ#1784267 ) 5.5. Technology Previews This part provides a list of all Technology Previews available in Red Hat Enterprise Linux 8.2. For information on Red Hat scope of support for Technology Preview features, see Technology Preview Features Support Scope . 5.5.1. Networking nmstate available as a Technology Preview Nmstate is a network API for hosts. The nmstate packages, available as a Technology Preview, provide a library and the nmstatectl command-line utility to manage host network settings in a declarative manner. The networking state is described by a pre-defined schema. Reporting of the current state and changes to the desired state both conform to the schema. For further details, see the /usr/share/doc/nmstate/README.md file and the examples in the /usr/share/doc/nmstate/examples directory. (BZ#1674456) AF_XDP available as a Technology Preview Address Family eXpress Data Path ( AF_XDP ) socket is designed for high-performance packet processing. It accompanies XDP and grants efficient redirection of programmatically selected packets to user space applications for further processing. (BZ#1633143) XDP available as a Technology Preview The eXpress Data Path (XDP) feature, which is available as a Technology Preview, provides a means to attach extended Berkeley Packet Filter (eBPF) programs for high-performance packet processing at an early point in the kernel ingress data path, allowing efficient programmable packet analysis, filtering, and manipulation. (BZ#1503672) KTLS available as a Technology Preview In Red Hat Enterprise Linux 8, Kernel Transport Layer Security (KTLS) is provided as a Technology Preview. KTLS handles TLS records using the symmetric encryption or decryption algorithms in the kernel for the AES-GCM cipher. KTLS also provides the interface for offloading TLS record encryption to Network Interface Controllers (NICs) that support this functionality. (BZ#1570255) The dracut utility now supports creating initrd images with NetworkManager support as a technology preview By default, the dracut utility uses a shell script to manage networking in the initial RAM disk ( initrd ). In certain cases, this could cause problems when the system switches from the RAM disk to the operating system that uses NetworkManager to configure the network. For example, NetworkManager could send another DHCP request, even if the script in the RAM disk already requested an IP address. This request from the RAM disk could result in a time out. To solve these kind of problems, dracut in RHEL 8.2 can now use NetworkManager in the RAM disk. Use the following commands to enable the feature and recreate the RAM disk images: Note that Red Hat does not support technology preview features. However, to provide feedback about this feature, please contact the Red Hat support. (BZ#1626348) The mlx5_core driver supports Mellanox ConnectX-6 Dx network adapter as a Technology Preview This enhancement adds the PCI IDs of the Mellanox ConnectX-6 Dx network adapter to the mlx5_core driver. On hosts that use this adapter, RHEL loads the mlx5_core driver automatically. Note that Red Hat provides this feature as an unsupported Technology Preview. (BZ#1687434) The systemd-resolved service is now available as a Technology Preview The systemd-resolved service provides name resolution to local applications. The service implements a caching and validating DNS stub resolver, an Link-Local Multicast Name Resolution (LLMNR), and Multicast DNS resolver and responder. Note that, even if the systemd package provides systemd-resolved , this service is an unsupported Technology Preview. (BZ#1906489) 5.5.2. Kernel kexec fast reboot as a Technology Preview The kexec fast reboot feature, continues to be available as a Technology Preview. Rebooting is now significantly faster thanks to kexec fast reboot . To use this feature, load the kexec kernel manually, and then reboot the operating system. ( BZ#1769727 ) eBPF available as a Technology Preview Extended Berkeley Packet Filter (eBPF) is an in-kernel virtual machine that allows code execution in the kernel space, in the restricted sandbox environment with access to a limited set of functions. The virtual machine includes a new system call bpf() , which supports creating various types of maps, and also allows to load programs in a special assembly-like code. The code is then loaded to the kernel and translated to the native machine code with just-in-time compilation. Note that the bpf() syscall can be successfully used only by a user with the CAP_SYS_ADMIN capability, such as the root user. See the bpf (2) man page for more information. The loaded programs can be attached onto a variety of points (sockets, tracepoints, packet reception) to receive and process data. There are numerous components shipped by Red Hat that utilize the eBPF virtual machine. Each component is in a different development phase, and thus not all components are currently fully supported. All components are available as a Technology Preview, unless a specific component is indicated as supported. The following notable eBPF components are currently available as a Technology Preview: bpftrace , a high-level tracing language that utilizes the eBPF virtual machine. The eXpress Data Path (XDP) feature, a networking technology that enables fast packet processing in the kernel using the eBPF virtual machine. (BZ#1559616) libbpf is available as a Technology Preview The libbpf package is currently available as a Technology Preview. The libbpf package is crucial for bpf related applications like bpftrace and bpf/xdp development. It is a mirror of bpf- linux tree bpf-/tools/lib/bpf directory plus its supporting header files. The version of the package reflects the version of the Application Binary Interface (ABI). (BZ#1759154) The igc driver available as a Technology Preview for RHEL 8 The igc Intel 2.5G Ethernet Linux wired LAN driver is now available on all architectures for RHEL 8 as a Technology Preview. The ethtool utility also supports igc wired LANs. (BZ#1495358) Soft-RoCE available as a Technology Preview Remote Direct Memory Access (RDMA) over Converged Ethernet (RoCE) is a network protocol which implements RDMA over Ethernet. Soft-RoCE is the software implementation of RoCE which supports two protocol versions, RoCE v1 and RoCE v2. The Soft-RoCE driver, rdma_rxe , is available as an unsupported Technology Preview in RHEL 8. (BZ#1605216) 5.5.3. File systems and storage NVMe/TCP is available as a Technology Preview Accessing and sharing Nonvolatile Memory Express (NVMe) storage over TCP/IP networks (NVMe/TCP) and its corresponding nvme-tcp.ko and nvmet-tcp.ko kernel modules have been added as a Technology Preview. The use of NVMe/TCP as either a storage client or a target is manageable with tools provided by the nvme-cli and nvmetcli packages. The NVMe/TCP target Technology Preview is included only for testing purposes and is not currently planned for full support. (BZ#1696451) File system DAX is now available for ext4 and XFS as a Technology Preview In Red Hat Enterprise Linux 8.2, file system DAX is available as a Technology Preview. DAX provides a means for an application to directly map persistent memory into its address space. To use DAX, a system must have some form of persistent memory available, usually in the form of one or more Non-Volatile Dual In-line Memory Modules (NVDIMMs), and a file system that supports DAX must be created on the NVDIMM(s). Also, the file system must be mounted with the dax mount option. Then, an mmap of a file on the dax-mounted file system results in a direct mapping of storage into the application's address space. (BZ#1627455) OverlayFS OverlayFS is a type of union file system. It enables you to overlay one file system on top of another. Changes are recorded in the upper file system, while the lower file system remains unmodified. This allows multiple users to share a file-system image, such as a container or a DVD-ROM, where the base image is on read-only media. OverlayFS remains a Technology Preview under most circumstances. As such, the kernel logs warnings when this technology is activated. Full support is available for OverlayFS when used with supported container engines ( podman , cri-o , or buildah ) under the following restrictions: OverlayFS is supported for use only as a container engine graph driver or other specialized use cases, such as squashed kdump initramfs. Its use is supported primarily for container COW content, not for persistent storage. You must place any persistent storage on non-OverlayFS volumes. You can use only the default container engine configuration: one level of overlay, one lowerdir, and both lower and upper levels are on the same file system. Only XFS is currently supported for use as a lower layer file system. Additionally, the following rules and limitations apply to using OverlayFS: The OverlayFS kernel ABI and user-space behavior are not considered stable, and might change in future updates. OverlayFS provides a restricted set of the POSIX standards. Test your application thoroughly before deploying it with OverlayFS. The following cases are not POSIX-compliant: Lower files opened with O_RDONLY do not receive st_atime updates when the files are read. Lower files opened with O_RDONLY , then mapped with MAP_SHARED are inconsistent with subsequent modification. Fully compliant st_ino or d_ino values are not enabled by default on RHEL 8, but you can enable full POSIX compliance for them with a module option or mount option. To get consistent inode numbering, use the xino=on mount option. You can also use the redirect_dir=on and index=on options to improve POSIX compliance. These two options make the format of the upper layer incompatible with an overlay without these options. That is, you might get unexpected results or errors if you create an overlay with redirect_dir=on or index=on , unmount the overlay, then mount the overlay without these options. To determine whether an existing XFS file system is eligible for use as an overlay, use the following command and see if the ftype=1 option is enabled: SELinux security labels are enabled by default in all supported container engines with OverlayFS. Several known issues are associated with OverlayFS in this release. For details, see Non-standard behavior in the Linux kernel documentation . For more information about OverlayFS, see the Linux kernel documentation . (BZ#1690207) Stratis is now available as a Technology Preview Stratis is a new local storage manager. It provides managed file systems on top of pools of storage with additional features to the user. Stratis enables you to more easily perform storage tasks such as: Manage snapshots and thin provisioning Automatically grow file system sizes as needed Maintain file systems To administer Stratis storage, use the stratis utility, which communicates with the stratisd background service. Stratis is provided as a Technology Preview. For more information, see the Stratis documentation: Setting up Stratis file systems . RHEL 8.2 updates Stratis to version 2.0.0. This version improves reliability and the Stratis DBus API. (JIRA:RHELPLAN-1212) IdM now supports setting up a Samba server on an IdM domain member as a Technology Preview With this update, you can now set up a Samba server on an Identity Management (IdM) domain member. The new ipa-client-samba utility provided by the same-named package adds a Samba-specific Kerberos service principal to IdM and prepares the IdM client. For example, the utility creates the /etc/samba/smb.conf with the ID mapping configuration for the sss ID mapping back end. As a result, administrators can now set up Samba on an IdM domain member. Due to IdM Trust Controllers not supporting the Global Catalog Service, AD-enrolled Windows hosts cannot find IdM users and groups in Windows. Additionally, IdM Trust Controllers do not support resolving IdM groups using the Distributed Computing Environment / Remote Procedure Calls (DCE/RPC) protocols. As a consequence, AD users can only access the Samba shares and printers from IdM clients. For details, see Setting up Samba on an IdM domain member . (JIRA:RHELPLAN-13195) 5.5.4. High availability and clusters Pacemaker podman bundles available as a Technology Preview Pacemaker container bundles now run on the podman container platform, with the container bundle feature being available as a Technology Preview. There is one exception to this feature being Technology Preview: Red Hat fully supports the use of Pacemaker bundles for Red Hat Openstack. (BZ#1619620) Heuristics in corosync-qdevice available as a Technology Preview Heuristics are a set of commands executed locally on startup, cluster membership change, successful connect to corosync-qnetd , and, optionally, on a periodic basis. When all commands finish successfully on time (their return error code is zero), heuristics have passed; otherwise, they have failed. The heuristics result is sent to corosync-qnetd where it is used in calculations to determine which partition should be quorate. ( BZ#1784200 ) New fence-agents-heuristics-ping fence agent As a Technology Preview, Pacemaker now supports the fence_heuristics_ping agent. This agent aims to open a class of experimental fence agents that do no actual fencing by themselves but instead exploit the behavior of fencing levels in a new way. If the heuristics agent is configured on the same fencing level as the fence agent that does the actual fencing but is configured before that agent in sequence, fencing issues an off action on the heuristics agent before it attempts to do so on the agent that does the fencing. If the heuristics agent gives a negative result for the off action it is already clear that the fencing level is not going to succeed, causing Pacemaker fencing to skip the step of issuing the off action on the agent that does the fencing. A heuristics agent can exploit this behavior to prevent the agent that does the actual fencing from fencing a node under certain conditions. A user might want to use this agent, especially in a two-node cluster, when it would not make sense for a node to fence the peer if it can know beforehand that it would not be able to take over the services properly. For example, it might not make sense for a node to take over services if it has problems reaching the networking uplink, making the services unreachable to clients, a situation which a ping to a router might detect in that case. (BZ#1775847) 5.5.5. Identity Management Identity Management JSON-RPC API available as Technology Preview An API is available for Identity Management (IdM). To view the API, IdM also provides an API browser as Technology Preview. In Red Hat Enterprise Linux 7.3, the IdM API was enhanced to enable multiple versions of API commands. Previously, enhancements could change the behavior of a command in an incompatible way. Users are now able to continue using existing tools and scripts even if the IdM API changes. This enables: Administrators to use or later versions of IdM on the server than on the managing client. Developers to use a specific version of an IdM call, even if the IdM version changes on the server. In all cases, the communication with the server is possible, regardless if one side uses, for example, a newer version that introduces new options for a feature. For details on using the API, see Using the Identity Management API to Communicate with the IdM Server (TECHNOLOGY PREVIEW) . ( BZ#1664719 ) DNSSEC available as Technology Preview in IdM Identity Management (IdM) servers with integrated DNS now support DNS Security Extensions (DNSSEC), a set of extensions to DNS that enhance security of the DNS protocol. DNS zones hosted on IdM servers can be automatically signed using DNSSEC. The cryptographic keys are automatically generated and rotated. Users who decide to secure their DNS zones with DNSSEC are advised to read and follow these documents: DNSSEC Operational Practices, Version 2: http://tools.ietf.org/html/rfc6781#section-2 Secure Domain Name System (DNS) Deployment Guide: http://dx.doi.org/10.6028/NIST.SP.800-81-2 DNSSEC Key Rollover Timing Considerations: http://tools.ietf.org/html/rfc7583 Note that IdM servers with integrated DNS use DNSSEC to validate DNS answers obtained from other DNS servers. This might affect the availability of DNS zones that are not configured in accordance with recommended naming practices. ( BZ#1664718 ) Checking the overall health of your public key infrastructure is now available as a Technology Preview With this update, the public key infrastructure (PKI) Healthcheck tool reports the health of the PKI subsystem to the Identity Management (IdM) Healthcheck tool, which was introduced in RHEL 8.1. Executing the IdM Healthcheck invokes the PKI Healthcheck, which collects and returns the health report of the PKI subsystem. The pki-healthcheck tool is available on any deployed RHEL IdM server or replica. All the checks provided by pki-healthcheck are also integrated into the ipa-healthcheck tool. ipa-healthcheck can be installed separately from the idm:DL1 module stream. Note that pki-healthcheck can also work in a standalone Red Hat Certificate System (RHCS) infrastructure. (BZ#1303254) 5.5.6. Desktop GNOME for the 64-bit ARM architecture available as a Technology Preview The GNOME desktop environment is now available for the 64-bit ARM architecture as a Technology Preview. This enables administrators to configure and manage servers from a graphical user interface (GUI) remotely, using the VNC session. As a consequence, new administration applications are available on the 64-bit ARM architecture. For example: Disk Usage Analyzer ( baobab ), Firewall Configuration ( firewall-config ), Red Hat Subscription Manager ( subscription-manager ), or the Firefox web browser. Using Firefox , administrators can connect to the local Cockpit daemon remotely. (JIRA:RHELPLAN-27394, BZ#1667516, BZ#1667225, BZ#1724302) 5.5.7. Graphics infrastructures VNC remote console available as a Technology Preview for the 64-bit ARM architecture On the 64-bit ARM architecture, the Virtual Network Computing (VNC) remote console is available as a Technology Preview. Note that the rest of the graphics stack is currently unverified for the 64-bit ARM architecture. (BZ#1698565) 5.5.8. Red Hat Enterprise Linux system roles The postfix role of RHEL system roles available as a Technology Preview Red Hat Enterprise Linux system roles provides a configuration interface for Red Hat Enterprise Linux subsystems, which makes system configuration easier through the inclusion of Ansible Roles. This interface enables managing system configurations across multiple versions of Red Hat Enterprise Linux, as well as adopting new major releases. The rhel-system-roles packages are distributed through the AppStream repository. The postfix role is available as a Technology Preview. The following roles are fully supported: kdump network selinux storage timesync For more information, see the Knowledgebase article about RHEL system roles . ( BZ#1812552 ) rhel-system-roles-sap available as a Technology Preview The rhel-system-roles-sap package provides Red Hat Enterprise Linux (RHEL) system roles for SAP, which can be used to automate the configuration of a RHEL system to run SAP workloads. These roles greatly reduce the time to configure a system to run SAP workloads by automatically applying the optimal settings that are based on best practices outlined in relevant SAP Notes. Access is limited to RHEL for SAP Solutions offerings. Please contact Red Hat Customer Support if you need assistance with your subscription. The following new roles in the rhel-system-roles-sap package are available as a Technology Preview: sap-preconfigure sap-netweaver-preconfigure sap-hana-preconfigure For more information, see Red Hat Enterprise Linux system roles for SAP . Note: RHEL 8.2 for SAP Solutions is scheduled to be validated for use with SAP HANA on Intel 64 architecture and IBM POWER9. Support for other SAP applications and database products, for example, SAP NetWeaver and SAP ASE, are tied to GA releases, and customers can use RHEL 8.2 features upon GA. Please consult SAP Notes 2369910 and 2235581 for the latest information about validated releases and SAP support. (BZ#1660832) 5.5.9. Virtualization Select Intel network adapters now support SR-IOV in RHEL guests on Hyper-V As a Technology Preview, Red Hat Enterprise Linux guest operating systems running on a Hyper-V hypervisor can now use the single-root I/O virtualization (SR-IOV) feature for Intel network adapters supported by the ixgbevf and i40evf drivers. This feature is enabled when the following conditions are met: SR-IOV support is enabled for the network interface controller (NIC) SR-IOV support is enabled for the virtual NIC SR-IOV support is enabled for the virtual switch The virtual function (VF) from the NIC is attached to the virtual machine. The feature is currently supported with Microsoft Windows Server 2019 and 2016. (BZ#1348508) KVM virtualization is usable in RHEL 8 Hyper-V virtual machines As a Technology Preview, nested KVM virtualization can now be used on the Microsoft Hyper-V hypervisor. As a result, you can create virtual machines on a RHEL 8 guest system running on a Hyper-V host. Note that currently, this feature only works on Intel systems. In addition, nested virtualization is in some cases not enabled by default on Hyper-V. To enable it, see the following Microsoft documentation: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/nested-virtualization (BZ#1519039) AMD SEV for KVM virtual machines As a Technology Preview, RHEL 8 introduces the Secure Encrypted Virtualization (SEV) feature for AMD EPYC host machines that use the KVM hypervisor. If enabled on a virtual machine (VM), SEV encrypts VM memory so that the host cannot access data on the VM. This increases the security of the VM if the host is successfully infected by malware. Note that the number of VMs that can use this feature at a time on a single host is determined by the host hardware. Current AMD EPYC processors support up to 509 running VMs using SEV. Also note that for VMs with SEV configured to be able to boot, you must also configure the VM with a hard memory limit. To do so, add the following to the VM's XML configuration: The recommended value for N is equal to or greater then the guest RAM + 256 MiB. For example, if the guest is assigned 2 GiB RAM, N should be 2359296 or greater. (BZ#1501618, BZ#1501607, JIRA:RHELPLAN-7677) Intel vGPU As a Technology Preview, it is now possible to divide a physical Intel GPU device into multiple virtual devices referred to as mediated devices . These mediated devices can then be assigned to multiple virtual machines (VMs) as virtual GPUs. As a result, these VMs share the performance of a single physical Intel GPU. Note that only selected Intel GPUs are compatible with the vGPU feature. In addition, assigning a physical GPU to VMs makes it impossible for the host to use the GPU, and may prevent graphical display output on the host from working. (BZ#1528684) 5.5.10. Containers skopeo container image is available as a Technology Preview The registry.redhat.io/rhel8/skopeo container image is a containerized implementation of the skopeo package. The skopeo is a command-line tool utility that performs various operations on container images and image repositories. This container image allows you to inspect and copy container images from one unauthenticated container registry to another. ( BZ#1627900 ) buildah container image is available as a Technology Preview The registry.redhat.io/rhel8/buildah container image is a containerized implementation of the buildah package. The buildah is a tool that facilitates building OCI container images. This container image allows you to build container images without the need to install the buildah package on your system. The use-case does not cover running this image in rootless mode as a non-root user. ( BZ#1627898 ) The podman-machine command is unsupported The podman-machine command for managing virtual machines, is available only as a Technology Preview. Instead, run Podman directly from the command line. (JIRA:RHELDOCS-16861) 5.6. Deprecated functionality This part provides an overview of functionality that has been deprecated in Red Hat Enterprise Linux 8.2. Deprecated devices are fully supported, which means that they are tested and maintained, and their support status remains unchanged within Red Hat Enterprise Linux 8. However, these devices will likely not be supported in the major version release, and are not recommended for new deployments on the current or future major versions of RHEL. For the most recent list of deprecated functionality within a particular major release, see the latest version of release documentation. For information about the length of support, see Red Hat Enterprise Linux Life Cycle and Red Hat Enterprise Linux Application Streams Life Cycle . A package can be deprecated and not recommended for further use. Under certain circumstances, a package can be removed from the product. Product documentation then identifies more recent packages that offer functionality similar, identical, or more advanced to the one deprecated, and provides further recommendations. For information regarding functionality that is present in RHEL 7 but has been removed in RHEL 8, see Considerations in adopting RHEL 8 . For information regarding functionality that is present in RHEL 8 but has been removed in RHEL 9, see Considerations in adopting RHEL 9 . 5.6.1. Installer and image creation Several Kickstart commands and options have been deprecated Using the following commands and options in RHEL 8 Kickstart files will print a warning in the logs. auth or authconfig device deviceprobe dmraid install lilo lilocheck mouse multipath bootloader --upgrade ignoredisk --interactive partition --active reboot --kexec Where only specific options are listed, the base command and its other options are still available and not deprecated. For more details and related changes in Kickstart, see the Kickstart changes section of the Considerations in adopting RHEL 8 document. (BZ#1642765) The --interactive option of the ignoredisk Kickstart command has been deprecated Using the --interactive option in future releases of Red Hat Enterprise Linux will result in a fatal installation error. It is recommended that you modify your Kickstart file to remove the option. (BZ#1637872) 5.6.2. Software management rpmbuild --sign is deprecated With this update, the rpmbuild --sign command has become deprecated. Using this command in future releases of Red Hat Enterprise Linux can result in an error. It is recommended that you use the rpmsign command instead. ( BZ#1688849 ) 5.6.3. Shells and command-line tools Metalink support for curl has been disabled A flaw was found in curl functionality in the way it handles credentials and file hash mismatch for content downloaded using the Metalink. This flaw allows malicious actors controlling a hosting server to: Trick users into downloading malicious content Gain unauthorized access to provided credentials without the user's knowledge The highest threat from this vulnerability is confidentiality and integrity. To avoid this, the Metalink support for curl has been disabled from Red Hat Enterprise Linux 8.2.0.z. As a workaround, execute the following command, after the Metalink file is downloaded: For example: (BZ#1999620) 5.6.4. Security NSS SEED ciphers are deprecated The Mozilla Network Security Services ( NSS ) library will not support TLS cipher suites that use a SEED cipher in a future release. For deployments that rely on SEED ciphers, Red Hat recommends enabling support for other cipher suites. This way, you ensure smooth transitions when NSS will remove support for them. Note that the SEED ciphers are already disabled by default in RHEL. ( BZ#1817533 ) TLS 1.0 and TLS 1.1 are deprecated The TLS 1.0 and TLS 1.1 protocols are disabled in the DEFAULT system-wide cryptographic policy level. If your scenario, for example, a video conferencing application in the Firefox web browser, requires using the deprecated protocols, switch the system-wide cryptographic policy to the LEGACY level: For more information, see the Strong crypto defaults in RHEL 8 and deprecation of weak crypto algorithms Knowledgebase article on the Red Hat Customer Portal and the update-crypto-policies(8) man page. ( BZ#1660839 ) DSA is deprecated in RHEL 8 The Digital Signature Algorithm (DSA) is considered deprecated in Red Hat Enterprise Linux 8. Authentication mechanisms that depend on DSA keys do not work in the default configuration. Note that OpenSSH clients do not accept DSA host keys even in the LEGACY system-wide cryptographic policy level. (BZ#1646541) SSL2 Client Hello has been deprecated in NSS The Transport Layer Security ( TLS ) protocol version 1.2 and earlier allow to start a negotiation with a Client Hello message formatted in a way that is backward compatible with the Secure Sockets Layer ( SSL ) protocol version 2. Support for this feature in the Network Security Services ( NSS ) library has been deprecated and it is disabled by default. Applications that require support for this feature need to use the new SSL_ENABLE_V2_COMPATIBLE_HELLO API to enable it. Support for this feature may be removed completely in future releases of Red Hat Enterprise Linux 8. (BZ#1645153) TPM 1.2 is deprecated The Trusted Platform Module (TPM) secure cryptoprocessor standard version was updated to version 2.0 in 2016. TPM 2.0 provides many improvements over TPM 1.2, and it is not backward compatible with the version. TPM 1.2 is deprecated in RHEL 8, and it might be removed in the major release. (BZ#1657927) 5.6.5. Networking Network scripts are deprecated in RHEL 8 Network scripts are deprecated in Red Hat Enterprise Linux 8 and they are no longer provided by default. The basic installation provides a new version of the ifup and ifdown scripts which call the NetworkManager service through the nmcli tool. In Red Hat Enterprise Linux 8, to run the ifup and the ifdown scripts, NetworkManager must be running. Note that custom commands in /sbin/ifup-local , ifdown-pre-local and ifdown-local scripts are not executed. If any of these scripts are required, the installation of the deprecated network scripts in the system is still possible with the following command: The ifup and ifdown scripts link to the installed legacy network scripts. Calling the legacy network scripts shows a warning about their deprecation. (BZ#1647725) 5.6.6. Kernel Installing RHEL for Real Time 8 using diskless boot is now deprecated Diskless booting allows multiple systems to share a root file system via the network. While convenient, diskless boot is prone to introducing network latency in realtime workloads. With a future minor update of RHEL for Real Time 8, the diskless booting feature will no longer be supported. ( BZ#1748980 ) The qla3xxx driver is deprecated The qla3xxx driver has been deprecated in RHEL 8. The driver will likely not be supported in future major releases of this product, and thus it is not recommended for new deployments. (BZ#1658840) The dl2k , dnet , ethoc , and dlci drivers are deprecated The dl2k , dnet , ethoc , and dlci drivers have been deprecated in RHEL 8. The drivers will likely not be supported in future major releases of this product, and thus they are not recommended for new deployments. (BZ#1660627) The rdma_rxe Soft-RoCE driver is deprecated Software Remote Direct Memory Access over Converged Ethernet (Soft-RoCE), also known as RXE, is a feature that emulates Remote Direct Memory Access (RDMA). In RHEL 8, the Soft-RoCE feature is available as an unsupported Technology Preview. However, due to stability issues, this feature has been deprecated and will be removed in RHEL 9. (BZ#1878207) 5.6.7. File systems and storage The elevator kernel command line parameter is deprecated The elevator kernel command line parameter was used in earlier RHEL releases to set the disk scheduler for all devices. In RHEL 8, the parameter is deprecated. The upstream Linux kernel has removed support for the elevator parameter, but it is still available in RHEL 8 for compatibility reasons. Note that the kernel selects a default disk scheduler based on the type of device. This is typically the optimal setting. If you require a different scheduler, Red Hat recommends that you use udev rules or the Tuned service to configure it. Match the selected devices and switch the scheduler only for those devices. For more information, see Setting the disk scheduler . (BZ#1665295) LVM mirror is deprecated The LVM mirror segment type is now deprecated. Support for mirror will be removed in a future major release of RHEL. Red Hat recommends that you use LVM RAID 1 devices with a segment type of raid1 instead of mirror . The raid1 segment type is the default RAID configuration type and replaces mirror as the recommended solution. To convert mirror devices to raid1 , see Converting a mirrored LVM device to a RAID1 logical volume . LVM mirror has several known issues. For details, see known issues in file systems and storage . (BZ#1827628) NFSv3 over UDP has been disabled The NFS server no longer opens or listens on a User Datagram Protocol (UDP) socket by default. This change affects only NFS version 3 because version 4 requires the Transmission Control Protocol (TCP). NFS over UDP is no longer supported in RHEL 8. (BZ#1592011) 5.6.8. Identity Management The SMB1 protocol is deprecated in Samba Starting with Samba 4.11, the insecure Server Message Block version 1 (SMB1) protocol is deprecated and will be removed in a future release. To improve the security, by default, SMB1 is disabled in the Samba server and client utilities. (JIRA:RHELDOCS-16612) 5.6.9. Desktop The libgnome-keyring library has been deprecated The libgnome-keyring library has been deprecated in favor of the libsecret library, as libgnome-keyring is not maintained upstream, and does not follow the necessary cryptographic policies for RHEL. The new libsecret library is the replacement that follows the necessary security standards. (BZ#1607766) 5.6.10. Graphics infrastructures AGP graphics cards are no longer supported Graphics cards using the Accelerated Graphics Port (AGP) bus are not supported in Red Hat Enterprise Linux 8. Use the graphics cards with PCI-Express bus as the recommended replacement. (BZ#1569610) 5.6.11. The web console The web console no longer supports incomplete translations The RHEL web console no longer provides translations for languages that have translations available for less than 50 % of the Console's translatable strings. If the browser requests translation to such a language, the user interface will be in English instead. ( BZ#1666722 ) 5.6.12. Virtualization virt-manager has been deprecated The Virtual Machine Manager application, also known as virt-manager , has been deprecated. The RHEL 8 web console, also known as Cockpit , is intended to become its replacement in a subsequent release. It is, therefore, recommended that you use the web console for managing virtualization in a GUI. Note, however, that some features available in virt-manager may not be yet available the RHEL 8 web console. (JIRA:RHELPLAN-10304) Virtual machine snapshots are not properly supported in RHEL 8 The current mechanism of creating virtual machine (VM) snapshots has been deprecated, as it is not working reliably. As a consequence, it is recommended not to use VM snapshots in RHEL 8. Note that a new VM snapshot mechanism is under development and will be fully implemented in a future minor release of RHEL 8. ( BZ#1686057 ) The Cirrus VGA virtual GPU type has been deprecated With a future major update of Red Hat Enterprise Linux, the Cirrus VGA GPU device will no longer be supported in KVM virtual machines. Therefore, Red Hat recommends using the stdvga , virtio-vga , or qxl devices instead of Cirrus VGA. (BZ#1651994) The cpu64-rhel6 CPU model has been deprecated and removed The cpu64-rhel6 QEMU virtual CPU model has been deprecated in RHEL 8.1, and has been removed from RHEL 8.2. It is recommended that you use the other CPU models provided by QEMU and libvirt, according to the CPU present on the host machine. (BZ#1741346) 5.6.13. Deprecated packages The following packages have been deprecated and will probably not be included in a future major release of Red Hat Enterprise Linux: 389-ds-base-legacy-tools authd custodia hostname libidn net-tools network-scripts nss-pam-ldapd sendmail yp-tools ypbind ypserv 5.7. Known issues This part describes known issues in Red Hat Enterprise Linux 8.2. 5.7.1. Installer and image creation The auth and authconfig Kickstart commands require the AppStream repository The authselect-compat package is required by the auth and authconfig Kickstart commands during installation. Without this package, the installation fails if auth or authconfig are used. However, by design, the authselect-compat package is only available in the AppStream repository. To work around this problem, verify that the BaseOS and AppStream repositories are available to the installer or use the authselect Kickstart command during installation. (BZ#1640697) The reboot --kexec and inst.kexec commands do not provide a predictable system state Performing a RHEL installation with the reboot --kexec Kickstart command or the inst.kexec kernel boot parameters do not provide the same predictable system state as a full reboot. As a consequence, switching to the installed system without rebooting can produce unpredictable results. Note that the kexec feature is deprecated and will be removed in a future release of Red Hat Enterprise Linux. (BZ#1697896) Anaconda installation includes low limits of minimal resources setting requirements Anaconda initiates the installation on systems with minimal resource settings required available and do not provide message warning about the required resources for performing the installation successfully. As a result, the installation can fail and the output errors do not provide clear messages for possible debug and recovery. To work around this problem, make sure that the system has the minimal resources settings required for installation: 2GB memory on PPC64(LE) and 1GB on x86_64. As a result, it should be possible to perform a successful installation. (BZ#1696609) Installation fails when using the reboot --kexec command The RHEL 8 installation fails when using a Kickstart file that contains the reboot --kexec command. To avoid the problem, use the reboot command instead of reboot --kexec in your Kickstart file. ( BZ#1672405 ) RHEL 8 initial setup cannot be performed via SSH Currently, the RHEL 8 initial setup interface does not display when logged in to the system using SSH. As a consequence, it is impossible to perform the initial setup on a RHEL 8 machine managed via SSH. To work around this problem, perform the initial setup in the main system console (ttyS0) and, afterwards, log in using SSH. ( BZ#1676439 ) Network access is not enabled by default in the installation program Several installation features require network access, for example, registration of a system using the Content Delivery Network (CDN), NTP server support, and network installation sources. However, network access is not enabled by default, and as a result, these features cannot be used until network access is enabled. To work around this problem, add ip=dhcp to boot options to enable network access when the installation starts. Optionally, passing a Kickstart file or a repository located on the network using boot options also resolves the problem. As a result, the network-based installation features can be used. (BZ#1757877) Registration fails for user accounts that belong to multiple organizations Currently, when you attempt to register a system with a user account that belongs to multiple organizations, the registration process fails with the error message You must specify an organization for new units . To work around this problem, you can either: Use a different user account that does not belong to multiple organizations. Use the Activation Key authentication method available in the Connect to Red Hat feature for GUI and Kickstart installations. Skip the registration step in Connect to Red Hat and use Subscription Manager to register your system post-installation. ( BZ#1822880 ) A GUI installation using the Binary DVD ISO image can sometimes not proceed without CDN registration When performing a GUI installation using the Binary DVD ISO image file, a race condition in the installer can sometimes prevent the installation from proceeding until you register the system using the Connect to Red Hat feature. To work around this problem, complete the following steps: Select Installation Source from the Installation Summary window of the GUI installation. Verify that Auto-detected installation media is selected. Click Done to confirm the selection and return to the Installation Summary window. Verify that Local Media is displayed as the Installation Source status in the Installation Summary window. As a result, you can proceed with the installation without registering the system using the Connect to Red Hat feature. (BZ#1823578) Copying the content of the Binary DVD.iso file to a partition omits the .treeinfo and .discinfo files During local installation, while copying the content of the RHEL 8 Binary DVD.iso image file to a partition, the * in the cp <path>/\* <mounted partition>/dir command fails to copy the .treeinfo and .discinfo files. These files are required for a successful installation. As a result, the BaseOS and AppStream repositories are not loaded, and a debug-related log message in the anaconda.log file is the only record of the problem. To work around the problem, copy the missing .treeinfo and .discinfo files to the partition. (BZ#1687747) Self-signed HTTPS server cannot be used in Kickstart installation Currently, the installer fails to install from a self-signed https server when the installation source is specified in the kickstart file and the --noverifyssl option is used: To work around this problem, append the inst.noverifyssl parameter to the kernel command line when starting the kickstart installation. For example: (BZ#1745064) GUI installation might fail if an attempt to unregister using the CDN is made before the repository refresh is completed In RHEL 8.2, when registering your system and attaching subscriptions using the Content Delivery Network (CDN), a refresh of the repository metadata is started by the GUI installation program. The refresh process is not part of the registration and subscription process, and as a consequence, the Unregister button is enabled in the Connect to Red Hat window. Depending on the network connection, the refresh process might take more than a minute to complete. If you click the Unregister button before the refresh process is completed, the GUI installation might fail as the unregister process removes the CDN repository files and the certificates required by the installation program to communicate with the CDN. To work around this problem, complete the following steps in the GUI installation after you have clicked the Register button in the Connect to Red Hat window: From the Connect to Red Hat window, click Done to return to the Installation Summary window. From the Installation Summary window, verify that the Installation Source and Software Selection status messages in italics are not displaying any processing information. When the Installation Source and Software Selection categories are ready, click Connect to Red Hat . Click the Unregister button. After performing these steps, you can safely unregister the system during the GUI installation. (BZ#1821192) 5.7.2. Subscription management syspurpose addons have no effect on the subscription-manager attach --auto output. In Red Hat Enterprise Linux 8, four attributes of the syspurpose command-line tool have been added: role , usage , service_level_agreement and addons . Currently, only role , usage and service_level_agreement affect the output of running the subscription-manager attach --auto command. Users who attempt to set values to the addons argument will not observe any effect on the subscriptions that are auto-attached. ( BZ#1687900 ) Data from multi-path storage devices is lost when installing RHEL using a Kickstart file Data from the multi-path storage devices that are attached to a host is lost when installing RHEL using a Kickstart file. This issue occurs because the installer fails to ignore the multi-path storage devices that you specify using ignoredisk --drives command. As a result, data on the devices is lost. To work around this problem, detach the devices before installation, or use ignoredisk --only-use command to specify the devices for installation. (BZ#1862131) 5.7.3. Shells and command-line tools Applications using Wayland protocol cannot be forwarded to remote display servers In Red Hat Enterprise Linux 8, most applications use the Wayland protocol by default instead of the X11 protocol. As a consequence, the ssh server cannot forward the applications that use the Wayland protocol but is able to forward the applications that use the X11 protocol to a remote display server. To work around this problem, set the environment variable GDK_BACKEND=x11 before starting the applications. As a result, the application can be forwarded to remote display servers. ( BZ#1686892 ) systemd-resolved.service fails to start on boot The systemd-resolved service occasionally fails to start on boot. If this happens, restart the service manually after the boot finishes by using the following command: However, the failure of systemd-resolved on boot does not impact any other services. (BZ#1640802) 5.7.4. Security Audit executable watches on symlinks do not work File monitoring provided by the -w option cannot directly track a path. It has to resolve the path to a device and an inode to make a comparison with the executed program. A watch monitoring an executable symlink monitors the device and an inode of the symlink itself instead of the program executed in memory, which is found from the resolution of the symlink. Even if the watch resolves the symlink to get the resulting executable program, the rule triggers on any multi-call binary called from a different symlink. This results in flooding logs with false positives. Consequently, Audit executable watches on symlinks do not work. To work around the problem, set up a watch for the resolved path of the program executable, and filter the resulting log messages using the last component listed in the comm= or proctitle= fields. (BZ#1846345) SELINUX=disabled in /etc/selinux/config does not work properly Disabling SELinux using the SELINUX=disabled option in the /etc/selinux/config results in a process in which the kernel boots with SELinux enabled and switches to disabled mode later in the boot process. This might cause memory leaks. To work around this problem, disable SELinux by adding the selinux=0 parameter to the kernel command line as described in the Changing SELinux modes at boot time section of the Using SELinux title if your scenario really requires to completely disable SELinux. (JIRA:RHELPLAN-34199) libselinux-python is available only through its module The libselinux-python package contains only Python 2 bindings for developing SELinux applications and it is used for backward compatibility. For this reason, libselinux-python is no longer available in the default RHEL 8 repositories through the dnf install libselinux-python command. To work around this problem, enable both the libselinux-python and python27 modules, and install the libselinux-python package and its dependencies with the following commands: Alternatively, install libselinux-python using its install profile with a single command: As a result, you can install libselinux-python using the respective module. (BZ#1666328) udica processes UBI 8 containers only when started with --env container=podman The Red Hat Universal Base Image 8 (UBI 8) containers set the container environment variable to the oci value instead of the podman value. This prevents the udica tool from analyzing a container JavaScript Object Notation (JSON) file. To work around this problem, start a UBI 8 container using a podman command with the --env container=podman parameter. As a result, udica can generate an SELinux policy for a UBI 8 container only when you use the described workaround. ( BZ#1763210 ) Removing the rpm-plugin-selinux package leads to removing all selinux-policy packages from the system Removing the rpm-plugin-selinux package disables SELinux on the machine. It also removes all selinux-policy packages from the system. Repeated installation of the rpm-plugin-selinux package then installs the selinux-policy-minimum SELinux policy, even if the selinux-policy-targeted policy was previously present on the system. However, the repeated installation does not update the SELinux configuration file to account for the change in policy. As a consequence, SELinux is disabled even upon reinstallation of the rpm-plugin-selinux package. To work around this problem: Enter the umount /sys/fs/selinux/ command. Manually install the missing selinux-policy-targeted package. Edit the /etc/selinux/config file so that the policy is equal to SELINUX=enforcing . Enter the command load_policy -i . As a result, SELinux is enabled and running the same policy as before. (BZ#1641631) SELinux prevents systemd-journal-gatewayd to call newfstatat() on shared memory files created by corosync SELinux policy does not contain a rule that allows the systemd-journal-gatewayd daemon to access files created by the corosync service. As a consequence, SELinux denies systemd-journal-gatewayd to call the newfstatat() function on shared memory files created by corosync . To work around this problem, create a local policy module with an allow rule which enables the described scenario. See the audit2allow(1) man page for more information on generating SELinux policy allow and dontaudit rules. As a result of the workaround, systemd-journal-gatewayd can call the function on shared memory files created by corosync with SELinux in enforcing mode. (BZ#1746398) SELinux prevents auditd to halt or power off the system The SELinux policy does not contain a rule that allows the Audit daemon to start a power_unit_file_t systemd unit. Consequently, auditd cannot halt or power off the system even when configured to do so in cases such as no space left on a logging disk partition. To work around this problem, create a custom SELinux policy module. As a result, auditd can properly halt or power off the system only if you apply the workaround. ( BZ#1826788 ) users can run sudo commands as locked users In systems where sudoers permissions are defined with the ALL keyword, sudo users with permissions can run sudo commands as users whose accounts are locked. Consequently, locked and expired accounts can still be used to execute commands. To work around this problem, enable the newly implemented runas_check_shell option together with proper settings of valid shells in /etc/shells . This prevents attackers from running commands under system accounts such as bin . (BZ#1786990) Negative effects of the default logging setup on performance The default logging environment setup might consume 4 GB of memory or even more and adjustments of rate-limit values are complex when systemd-journald is running with rsyslog . See the Negative effects of the RHEL default logging setup on performance and their mitigations Knowledgebase article for more information. (JIRA:RHELPLAN-10431) Parameter not known errors in the rsyslog output with config.enabled In the rsyslog output, an unexpected bug occurs in configuration processing errors using the config.enabled directive. As a consequence, parameter not known errors are displayed while using the config.enabled directive except for the include() statements. To work around this problem, set config.enabled=on or use include() statements. (BZ#1659383) Certain rsyslog priority strings do not work correctly Support for the GnuTLS priority string for imtcp that allows fine-grained control over encryption is not complete. Consequently, the following priority strings do not work properly in rsyslog : To work around this problem, use only correctly working priority strings: As a result, current configurations must be limited to the strings that work correctly. ( BZ#1679512 ) Connections to servers with SHA-1 signatures do not work with GnuTLS SHA-1 signatures in certificates are rejected by the GnuTLS secure communications library as insecure. Consequently, applications that use GnuTLS as a TLS backend cannot establish a TLS connection to peers that offer such certificates. This behavior is inconsistent with other system cryptographic libraries. To work around this problem, upgrade the server to use certificates signed with SHA-256 or stronger hash, or switch to the LEGACY policy. (BZ#1628553) TLS 1.3 does not work in NSS in FIPS mode TLS 1.3 is not supported on systems working in FIPS mode. As a result, connections that require TLS 1.3 for interoperability do not function on a system working in FIPS mode. To enable the connections, disable the system's FIPS mode or enable support for TLS 1.2 in the peer. ( BZ#1724250 ) OpenSSL incorrectly handles PKCS #11 tokens that does not support raw RSA or RSA-PSS signatures The OpenSSL library does not detect key-related capabilities of PKCS #11 tokens. Consequently, establishing a TLS connection fails when a signature is created with a token that does not support raw RSA or RSA-PSS signatures. To work around the problem, add the following lines after the .include line at the end of the crypto_policy section in the /etc/pki/tls/openssl.cnf file: As a result, a TLS connection can be established in the described scenario. ( BZ#1685470 ) OpenSSL generates a malformed status_request extension in the CertificateRequest message in TLS 1.3 OpenSSL servers send a malformed status_request extension in the CertificateRequest message if support for the status_request extension and client certificate-based authentication are enabled. In such case, OpenSSL does not interoperate with implementations compliant with the RFC 8446 protocol. As a result, clients that properly verify extensions in the CertificateRequest message abort connections with the OpenSSL server. To work around this problem, disable support for the TLS 1.3 protocol on either side of the connection or disable support for status_request on the OpenSSL server. This will prevent the server from sending malformed messages. ( BZ#1749068 ) ssh-keyscan cannot retrieve RSA keys of servers in FIPS mode The SHA-1 algorithm is disabled for RSA signatures in FIPS mode, which prevents the ssh-keyscan utility from retrieving RSA keys of servers operating in that mode. To work around this problem, use ECDSA keys instead, or retrieve the keys locally from the /etc/ssh/ssh_host_rsa_key.pub file on the server. ( BZ#1744108 ) Libreswan does not work properly with seccomp=enabled on all configurations The set of allowed syscalls in the Libreswan SECCOMP support implementation is currently not complete. Consequently, when SECCOMP is enabled in the ipsec.conf file, the syscall filtering rejects even syscalls needed for the proper functioning of the pluto daemon; the daemon is killed, and the ipsec service is restarted. To work around this problem, set the seccomp= option back to the disabled state. SECCOMP support must remain disabled to run ipsec properly. ( BZ#1777474 ) Certain sets of interdependent rules in SSG can fail Remediation of SCAP Security Guide (SSG) rules in a benchmark can fail due to undefined ordering of rules and their dependencies. If two or more rules need to be executed in a particular order, for example, when one rule installs a component and another rule configures the same component, they can run in the wrong order and remediation reports an error. To work around this problem, run the remediation twice, and the second run fixes the dependent rules. ( BZ#1750755 ) SCAP Workbench fails to generate results-based remediations from tailored profiles The following error occurs when trying to generate results-based remediation roles from a customized profile using the SCAP Workbench tool: To work around this problem, use the oscap command with the --tailoring-file option. (BZ#1640715) Kickstart uses org_fedora_oscap instead of com_redhat_oscap in RHEL 8 The Kickstart references the Open Security Content Automation Protocol (OSCAP) Anaconda add-on as org_fedora_oscap instead of com_redhat_oscap which might cause confusion. That is done to preserve backward compatibility with Red Hat Enterprise Linux 7. (BZ#1665082) OSCAP Anaconda Addon does not install all packages in text mode The OSCAP Anaconda Addon plugin cannot modify the list of packages selected for installation by the system installer if the installation is running in text mode. Consequently, when a security policy profile is specified using Kickstart and the installation is running in text mode, any additional packages required by the security policy are not installed during installation. To work around this problem, either run the installation in graphical mode or specify all packages that are required by the security policy profile in the security policy in the %packages section in your Kickstart file. As a result, packages that are required by the security policy profile are not installed during RHEL installation without one of the described workarounds, and the installed system is not compliant with the given security policy profile. ( BZ#1674001 ) OSCAP Anaconda Addon does not correctly handle customized profiles The OSCAP Anaconda Addon plugin does not properly handle security profiles with customizations in separate files. Consequently, the customized profile is not available in the RHEL graphical installation even when you properly specify it in the corresponding Kickstart section. To work around this problem, follow the instructions in the Creating a single SCAP data stream from an original DS and a tailoring file Knowledgebase article. As a result of this workaround, you can use a customized SCAP profile in the RHEL graphical installation. (BZ#1691305) GnuTLS fails to resume current session with the NSS server When resuming a TLS (Transport Layer Security) 1.3 session, the GnuTLS client waits 60 milliseconds plus an estimated round trip time for the server to send session resumption data. If the server does not send the resumption data within this time, the client creates a new session instead of resuming the current session. This incurs no serious adverse effects except for a minor performance impact on a regular session negotiation. ( BZ#1677754 ) The oscap-ssh utility fails when scanning a remote system with --sudo When performing a Security Content Automation Protocol (SCAP) scan of a remote system using the oscap-ssh tool with the --sudo option, the oscap tool on the remote system saves scan result files and report files into a temporary directory as the root user. If the umask settings on the remote machine have been changed, oscap-ssh might not have access to these files. To work around this problem, modify the oscap-ssh tool as described in this solution "oscap-ssh --sudo" fails to retrieve the result files with "scp: ... : Permission denied" error . As a result, oscap saves the files as the target user, and oscap-ssh accesses the files normally. ( BZ#1803116 ) OpenSCAP produces false positives caused by removing blank lines from YAML multi-line strings When OpenSCAP generates Ansible remediations from a datastream, it removes blank lines from YAML multi-line strings. Because some Ansible remediations contain literal configuration file content, removing blank lines affects the corresponding remediations. This causes the openscap utility to fail the corresponding Open Vulnerability and Assessment Language (OVAL) checks, even though the blank lines do not have any effect. To work around this problem, check the rule descriptions and skip scan results that failed because of missing blank lines. Alternatively, use Bash remediations instead of Ansible remediations, because Bash remediations do not produce these false positive results. ( BZ#1795563 ) OSPP-based profiles are incompatible with GUI package groups. GNOME packages installed by the Server with GUI package group require the nfs-utils package that is not compliant with the Operating System Protection Profile (OSPP). As a consequence, selecting the Server with GUI package group during the installation of a system with OSPP or OSPP-based profiles, for example, Security Technical Implementation Guide (STIG), aborts the installation. If the OSPP-based profile is applied after the installation, the system is not bootable. To work around this problem, do not install the Server with GUI package group or any other groups that install GUI when using the OSPP profile and OSPP-based profiles. When you use the Server or Minimal Install package groups instead, the system installs without issues and works correctly. ( BZ#1787156 ) RHEL8 system with the Server with GUI package group cannot be remediated using the e8 profile Using the OpenSCAP Anaconda Add-on to harden the system on the Server With GUI package group with profiles that select rules from the Verify Integrity with RPM group requires an extreme amount of RAM on the system. This problem is caused by the OpenSCAP scanner; for more details see Scanning large numbers of files with OpenSCAP causes systems to run out of memory . As a consequence, the hardening of the system using the RHEL8 Essential Eight (e8) profile is not successful. To work around this problem, choose a smaller package group, for example, Server, and install additional packages that you require after the installation. As a result, the system will have a smaller number of packages, the scanning will require less memory, and therefore the system can be hardened automatically. (BZ#1816199) Scanning large numbers of files with OpenSCAP causes systems to run out of memory The OpenSCAP scanner stores all the collected results in the memory until the scan finishes. As a consequence, the system might run out of memory on systems with low RAM when scanning large numbers of files, for example from the large package groups Server with GUI and Workstation . To work around this problem, use smaller package groups, for example, Server and Minimal Install on systems with limited RAM. If you need to use large package groups, you can test whether your system has sufficient memory in a virtual or staging environment. Alternatively, you can tailor the scanning profile to deselect rules that involve recursion over the entire / filesystem: rpm_verify_hashes rpm_verify_permissions rpm_verify_ownership file_permissions_unauthorized_world_writable no_files_unowned_by_user dir_perms_world_writable_system_owned file_permissions_unauthorized_suid file_permissions_unauthorized_sgid file_permissions_ungroupowned dir_perms_world_writable_sticky_bits This will prevent OpenSCAP scan from causing the system to run out of memory. ( BZ#1824152 ) 5.7.5. Networking IPsec network traffic fails during IPsec offloading when GRO is disabled IPsec offloading is not expected to work when Generic Receive Offload (GRO) is disabled on the device. If IPsec offloading is configured on a network interface and GRO is disabled on that device, IPsec network traffic fails. To work around this problem, keep GRO enabled on the device. (BZ#1649647) iptables does not request module loading for commands that update a chain if the specified chain type is not known Note: This problem causes spurious errors with no functional implication when stopping the iptables systemd service if you are using the services default configuration. When setting a chain's policy with iptables-nft , the resulting update chain command sent to the kernel will fail if the associated kernel module is not loaded already. To work around the problem, use the following commands to cause the modules to load: + (BZ#1812666) Automatic loading of address family-specific LOG back end modules by the nft_compat module can hang When the nft_compat module loads address family-specific LOG target back ends while an operation on network namespaces ( netns ) happens in parallel, a lock collision can occur. As a consequence, loading the address family-specific LOG target back ends can hang. To work around the problem, manually load the relevant LOG target back ends, such as nf_log_ipv4.ko and nf_log_ipv6.ko , before executing the iptables-restore utility. As a result, loading the LOG target back ends does not hang. However, if the problem appears during the system boots, no workaround is available. Note that other services, such as libvirtd , also execute iptables commands, which can cause the problem to occur. (BZ#1757933) 5.7.6. Kernel Accidental patch removal causes huge_page_setup_helper.py to show error A patch that updates the huge_page_setup_helper.py script, was accidentally removed. Consequently, after executing the huge_page_setup_helper.py script, the following error message appears: To work around this problem, copy the huge_page_setup_helper.py script from RHEL 8.1 and install it to the /usr/bin/ directory: Download the libhugetlbfs-utils-2.21-3.el8.x86_64.rpm package from the RHEL-8.1.0 Installation Media or from the Red Hat Customer Portal . Execute the rpm2cpio command: The command extracts the huge_page_setup_helper.py script from the RHEL 8.1 RPM and saves it to the /usr/bin/ directory. As a result, the huge_page_setup_helper.py script works correctly. (BZ#1823398) Systems with a large amount of persistent memory experience delays during the boot process Systems with a large amount of persistent memory take a long time to boot because the initialization of the memory is serialized. Consequently, if there are persistent memory file systems listed in the /etc/fstab file, the system might timeout while waiting for devices to become available. To work around this problem, configure the DefaultTimeoutStartSec option in the /etc/systemd/system.conf file to a sufficiently large value. (BZ#1666538) KSM sometimes ignores NUMA memory policies When the kernel shared memory (KSM) feature is enabled with the merge_across_nodes=1 parameter, KSM ignores memory policies set by the mbind() function, and may merge pages from some memory areas to Non-Uniform Memory Access (NUMA) nodes that do not match the policies. To work around this problem, disable KSM or set the merge_across_nodes parameter to 0 if using NUMA memory binding with QEMU. As a result, NUMA memory policies configured for the KVM VM will work as expected. (BZ#1153521) Debug kernel fails to boot in crash capture environment in RHEL 8 Due to memory-demanding nature of the debug kernel, a problem occurs when the debug kernel is in use and a kernel panic is triggered. As a consequence, the debug kernel is not able to boot as the capture kernel, and a stack trace is generated instead. To work around this problem, increase the crash kernel memory accordingly. As a result, the debug kernel successfully boots in the crash capture environment. (BZ#1659609) zlib may slow down a vmcore capture in some compression functions The kdump configuration file uses the lzo compression format ( makedumpfile -l ) by default. When you modify the configuration file using the zlib compression format, ( makedumpfile -c ) it is likely to bring a better compression factor at the expense of slowing down the vmcore capture process. As a consequence, it takes the kdump upto four times longer to capture a vmcore with zlib , as compared to lzo . As a result, Red Hat recommends using the default lzo for cases where speed is the main driving factor. However, if the target machine is low on available space, zlib is a better option. (BZ#1790635) A vmcore capture fails after memory hot-plug or unplug operation After performing the memory hot-plug or hot-unplug operation, the event comes after updating the device tree which contains memory layout information. Thereby the makedumpfile utility tries to access a non-existent physical address. The problem appears if all of the following conditions meet: A little-endian variant of IBM Power System runs RHEL 8. The kdump or fadump service is enabled on the system. Consequently, the capture kernel fails to save vmcore if a kernel crash is triggered after the memory hot-plug or hot-unplug operation. To work around this problem, restart the kdump service after hot-plug or hot-unplug: As a result, vmcore is successfully saved in the described scenario. (BZ#1793389) The fadump dumping mechanism renames the network interface to kdump-<interface-name> When using firmware-assisted dump ( fadump ) to capture a vmcore and store it to a remote machine using SSH or NFS protocol, renames the network interface to kdump-<interface-name> . The renaming happens when the <interface-name> is generic, for example, *eth#, or net# and so on. This problem occurs because the vmcore capture scripts in the initial RAM disk ( initrd ) add the kdump- prefix to the network interface name to secure persistent naming. Since the same initrd is also used for a regular boot, the interface name is changed for the production kernel too. (BZ#1745507) The system enters the emergency mode at boot-time when fadump is enabled The system enters the emergency mode when fadump ( kdump ) or dracut squash module is enabled in the initramfs scheme because systemd manager fails to fetch the mount information and configure the LV partition to mount. To work around this problem, add the following kernel command line parameter rd.lvm.lv=<VG>/<LV> to discover and mount the failed LV partition appropriately. As a result, the system will boot successfully in the described scenario. (BZ#1750278) Using irqpoll causes vmcore generation failure Due to an existing problem with the nvme driver on the 64-bit ARM architectures that run on the Amazon Web Services (AWS) cloud platforms, the vmcore generation fails when you provide the irqpoll kernel command line parameter to the first kernel. Consequently, no vmcore file is dumped in the /var/crash/ directory after a kernel crash. To work around this problem: Add irqpoll to the KDUMP_COMMANDLINE_REMOVE key in the /etc/sysconfig/kdump file. Restart the kdump service by running the systemctl restart kdump command. As a result, the first kernel boots correctly and the vmcore file is expected to be captured upon the kernel crash. Note that the kdump service can use a significant amount of crash kernel memory to dump the vmcore file. Ensure that the capture kernel has sufficient memory available for the kdump service. (BZ#1654962) Using vPMEM memory as dump target delays the kernel crash capture process When you use Virtual Persistent Memory (vPEM) namespaces as kdump or fadump target, the papr_scm module is forced to unmap and remap the memory backed by vPMEM and re-add the memory to its linear map. Consequently, this behavior triggers Hypervisor Calls (HCalls) to the POWER Hypervisor, and the total time taken, slows the capture kernel boot considerably. Therefore, it is recommended not to use vPMEM namespaces as a dump target for kdump or fadump. If you must use vPMEM, to work around this problem execute the following commands: Create the /etc/dracut.conf.d/99-pmem-workaround.conf file and add: Rebuild the initial RAM disk (initrd) file system: (BZ#1792125) The HP NMI watchdog does not always generate a crash dump In certain cases, the hpwdt driver for the HP NMI watchdog is not able to claim a non-maskable interrupt (NMI) generated by the HPE watchdog timer because the NMI was instead consumed by the perfmon driver. The missing NMI is initiated by one of two conditions: The Generate NMI button on the Integrated Lights-Out (iLO) server management software. This button is triggered by a user. The hpwdt watchdog. The expiration by default sends an NMI to the server. Both sequences typically occur when the system is unresponsive. Under normal circumstances, the NMI handler for both these situations calls the kernel panic() function and if configured, the kdump service generates a vmcore file. Because of the missing NMI, however, kernel panic() is not called and vmcore is not collected. In the first case (1.), if the system was unresponsive, it remains so. To work around this scenario, use the virtual Power button to reset or power cycle the server. In the second case (2.), the missing NMI is followed 9 seconds later by a reset from the Automated System Recovery (ASR). The HPE Gen9 Server line experiences this problem in single-digit percentages. The Gen10 at an even smaller frequency. (BZ#1602962) The tuned-adm profile powersave command causes the system to become unresponsive Executing the tuned-adm profile powersave command leads to an unresponsive state of the Penguin Valkyrie 2000 2-socket systems with the older Thunderx (CN88xx) processors. Consequently, reboot the system to resume working. To work around this problem, avoid using the powersave profile if your system matches the mentioned specifications. (BZ#1609288) The cxgb4 driver causes crash in the kdump kernel The kdump kernel crashes while trying to save information in the vmcore file. Consequently, the cxgb4 driver prevents the kdump kernel from saving a core for later analysis. To work around this problem, add the novmcoredd parameter to the kdump kernel command line to allow saving core files. (BZ#1708456) Attempting to add ICE driver NIC port to a mode 5 ( balance-tlb ) bonding master interface might lead to failure Attempting to add ICE driver NIC port to a mode 5 (balance-tlb) bonding master interface might lead to a failure with an error Master 'bond0', Slave 'ens1f0': Error: Enslave failed . Consequently, you experience an intermittent failure to add the NIC port to the bonding master interface. To workaround this problem, attempt to retry adding the interface. (BZ#1791664) Attaching the Virtual Function to virtual machine with interface type='hostdev' might fails at times Attaching a Virtual Function (VF) to a virtual machine using an .XML file, following the Assignment with <interface type='hostdev'> method, might fail at times. This occurs because using the Assignment with <interface type='hostdev'> method prevents the VM from attaching to the VF NIC presented to this virtual machine. To workaround this problem, attach the VF to the VM using the .XML file using the Assignment with <hostdev> method. As a result, the virsh attach-device command succeeds without error. For more details about the difference between Assignment with <hostdev> and Assignment with <interface type='hostdev'> (SRIOV devices only), see PCI Passthrough of host network devices . (BZ#1792691) 5.7.7. File systems and storage The /boot file system cannot be placed on LVM You cannot place the /boot file system on an LVM logical volume. This limitation exists for the following reasons: On EFI systems, the EFI System Partition conventionally serves as the /boot file system. The uEFI standard requires a specific GPT partition type and a specific file system type for this partition. RHEL 8 uses the Boot Loader Specification (BLS) for system boot entries. This specification requires that the /boot file system is readable by the platform firmware. On EFI systems, the platform firmware can read only the /boot configuration defined by the uEFI standard. The support for LVM logical volumes in the GRUB 2 boot loader is incomplete. Red Hat does not plan to improve the support because the number of use cases for the feature is decreasing due to standards such as uEFI and BLS. Red Hat does not plan to support /boot on LVM. Instead, Red Hat provides tools for managing system snapshots and rollback that do not need the /boot file system to be placed on an LVM logical volume. (BZ#1496229) LVM no longer allows creating volume groups with mixed block sizes LVM utilities such as vgcreate or vgextend no longer allow you to create volume groups (VGs) where the physical volumes (PVs) have different logical block sizes. LVM has adopted this change because file systems fail to mount if you extend the underlying logical volume (LV) with a PV of a different block size. To re-enable creating VGs with mixed block sizes, set the allow_mixed_block_sizes=1 option in the lvm.conf file. ( BZ#1768536 ) DM Multipath might fail to start when too many LUNs are connected The multipathd service might time out and fail to start if too many logical units (LUNs) are connected to the system. The exact number of LUNs that causes the problem depends on several factors, including the number of devices, the response time of the storage array, the memory and CPU configuration, and system load. To work around the problem, increase the timeout value in the multipathd unit file: Open the multipathd unit in the unit editor: Enter the following configuration to override the timeout value: Red Hat recommends increasing the value to 300 from the default 90, but you can also test other values above 90. Save the file in the editor. Reload systemd units to apply the change: As a result, multipathd can now successfully start with a larger number of LUNs. (BZ#1797660) Limitations of LVM writecache The writecache LVM caching method has the following limitations, which are not present in the cache method: You cannot take a snapshot of a logical volume while the logical volume is using writecache . You cannot attach or detach writecache while a logical volume is active. When attaching writecache to an inactive logical volume, you must use a writecache block size that matches the existing file system block size. For details, see the lvmcache(7) man page. You cannot resize a logical volume while writecache is attached to it. You cannot use pvmove commands on devices that are used with writecache . You cannot use logical volumes with writecache in combination with thin pools or VDO. (JIRA:RHELPLAN-27987, BZ#1798631 , BZ#1808012) LVM mirror devices that store a LUKS volume sometimes become unresponsive Mirrored LVM devices with a segment type of mirror that store a LUKS volume might become unresponsive under certain conditions. The unresponsive devices reject all I/O operations. To work around the issue, Red Hat recommends that you use LVM RAID 1 devices with a segment type of raid1 instead of mirror if you need to stack LUKS volumes on top of resilient software-defined storage. The raid1 segment type is the default RAID configuration type and replaces mirror as the recommended solution. To convert mirror devices to raid1 , see Converting a mirrored LVM device to a RAID1 device . (BZ#1730502) An NFS 4.0 patch can result in reduced performance under an open-heavy workload Previously, a bug was fixed that, in some cases, could cause an NFS open operation to overlook the fact that a file had been removed or renamed on the server. However, the fix may cause slower performance with workloads that require many open operations. To work around this problem, it might help to use NFS version 4.1 or higher, which have been improved to grant delegations to clients in more cases, allowing clients to perform open operations locally, quickly, and safely. (BZ#1748451) 5.7.8. Dynamic programming languages, web and database servers getpwnam() might fail when called by a 32-bit application When a user of NIS uses a 32-bit application that calls the getpwnam() function, the call fails if the nss_nis.i686 package is missing. To work around this problem, manually install the missing package by using the yum install nss_nis.i686 command. ( BZ#1803161 ) nginx cannot load server certificates from hardware security tokens The nginx web server supports loading TLS private keys from hardware security tokens directly from PKCS#11 modules. However, it is currently impossible to load server certificates from hardware security tokens through the PKCS#11 URI. To work around this problem, store server certificates on the file system ( BZ#1668717 ) php-fpm causes SELinux AVC denials when php-opcache is installed with PHP 7.2 When the php-opcache package is installed, the FastCGI Process Manager ( php-fpm ) causes SELinux AVC denials. To work around this problem, change the default configuration in the /etc/php.d/10-opcache.ini file to the following: Note that this problem affects only the php:7.2 stream, not the php:7.3 one. ( BZ#1670386 ) The mod_wsgi package name is missing when being installed as a dependency With a change in mod_wsgi installation, described in BZ#1779705 , the python3-mod_wsgi package no longer provides the name mod_wsgi . When installing the mod_wsgi module, you must specify the full package name. This change causes problems with dependencies of third-party packages. If you try to install a third-party package that requires a dependency named mod_wsgi , an error similar to the following is returned: To work around this problem, choose one of the following: Rebuild the package (or ask the third-party vendor for a new build) to require the full package name python3-mod_wsgi . Create a meta package with the missing package name: Build your own empty meta package that provides the name mod_wsgi . Add the module_hotfixes=True line to the .repo configuration file of the repository that includes the meta package. Manually install python3-mod_wsgi . ( BZ#1829692 ) 5.7.9. Compilers and development tools Synthetic functions generated by GCC confuse SystemTap GCC optimization can generate synthetic functions for partially inlined copies of other functions. Tools such as SystemTap and GDB cannot distinguish these synthetic functions from real functions. As a consequence, SystemTap places probes on both synthetic and real function entry points and, thus, registers multiple probe hits for a single real function call. To work around this problem, modify SystemTap scripts to detect recursion and prevent placing of probes related to inlined partial functions. This example script can be modified this way: Note that this example script does not consider all possible scenarios, such as missed kprobes or kretprobes, or genuine intended recursion. (BZ#1169184) 5.7.10. Identity Management Changing /etc/nsswitch.conf requires a manual system reboot Any change to the /etc/nsswitch.conf file, for example running the authselect select profile_id command, requires a system reboot so that all relevant processes use the updated version of the /etc/nsswitch.conf file. If a system reboot is not possible, restart the service that joins your system to Active Directory, which is the System Security Services Daemon (SSSD) or winbind . ( BZ#1657295 ) SSSD returns incorrect LDAP group membership for local users when the files domain is enabled If the System Security Services Daemon (SSSD) serves users from the local files and the ldap_rfc2307_fallback_to_local_users attribute in the [domain/LDAP] section of the sssd.conf file is set to True, then the files provider does not include group memberships from other domains. As a consequence, if a local user is a member of an LDAP group, the id local_user command does not return the user's LDAP group membership. To work around this problem, disable the implicit files domain by adding to the [sssd] section in the /etc/sssd/sssd.conf file. As a result, id local_user returns correct LDAP group membership for local users. ( BZ#1652562 ) SSSD does not correctly handle multiple certificate matching rules with the same priority If a given certificate matches multiple certificate matching rules with the same priority, the System Security Services Daemon (SSSD) uses only one of the rules. As a workaround, use a single certificate matching rule whose LDAP filter consists of the filters of the individual rules concatenated with the | (or) operator. For examples of certificate matching rules, see the sss-certamp(5) man page. (BZ#1447945) Private groups fail to be created with auto_private_group = hybrid when multiple domains are defined Private groups fail to be created with the option auto_private_group = hybrid when multiple domains are defined and the hybrid option is used by any domain other than the first one. If an implicit files domain is defined along with an AD or LDAP domain in the sssd.conf file and is not marked as MPG_HYBRID , then SSSD fails to create a private group for a user who has uid=gid and the group with this gid does not exist in AD or LDAP. The sssd_nss responder checks for the value of the auto_private_groups option in the first domain only. As a consequence, in setups where multiple domains are configured, which includes the default setup on RHEL 8, the option auto_private_group has no effect. To work around this problem, set enable_files_domain = false in the sssd section of of sssd.conf . As a result, If the enable_files_domain option is set to false, then sssd does not add a domain with id_provider=files at the start of the list of active domains, and therefore this bug does not occur. (BZ#1754871) python-ply is not FIPS compatible The YACC module of the python-ply package uses the MD5 hashing algorithm to generate the fingerprint of a YACC signature. However, FIPS mode blocks the use of MD5, which is only allowed in non-security contexts. As a consequence, python-ply is not FIPS compatible. On a system in FIPS mode, all calls to ply.yacc.yacc() fail with the error message: The problem affects python-pycparser and some use cases of python-cffi . To work around this problem, modify the line 2966 of the file /usr/lib/python3.6/site-packages/ply/yacc.py , replacing sig = md5() with sig = md5(usedforsecurity=False) . As a result, python-ply can be used in FIPS mode. ( BZ#1747490 ) FreeRADIUS silently truncates Tunnel-Passwords longer than 249 characters If a Tunnel-Password is longer than 249 characters, the FreeRADIUS service silently truncates it. This may lead to unexpected password incompatibilities with other systems. To work around the problem, choose a password that is 249 characters or fewer. ( BZ#1723362 ) Installing KRA fails if all KRA members are hidden replicas The ipa-kra-install utility fails on a cluster where the Key Recovery Authority (KRA) is already present if the first KRA instance is installed on a hidden replica. Consequently, you cannot add further KRA instances to the cluster. To work around this problem, unhide the hidden replica that has the KRA role before you add new KRA instances. You can hide it again when ipa-kra-install completes successfully. ( BZ#1816784 ) Directory Server warns about missing attributes in the schema if those attributes are used in a search filter If you set the nsslapd-verify-filter-schema parameter to warn-invalid , Directory Server processes search operations with attributes that are not defined in the schema and logs a warning. With this setting, Directory Server returns requested attributes in search results, regardless whether the attributes is defined in the schema or not. A future version of Directory Server will change the default setting of nsslapd-verify-filter-schema to enforce stricter checks. The new default will warn about attributes that are missing in the schema, and reject requests or return only partial results. ( BZ#1790259 ) ipa-healthcheck-0.4 does not obsolete older versions of ipa-healthcheck The Healthcheck tool has been split into two sub-packages: ipa-healthcheck and ipa-healthcheck-core . However, only the ipa-healthcheck-core sub-package is correctly set to obsolete older versions of ipa-healthcheck . As a result, updating Healthcheck only installs ipa-healthcheck-core and the ipa-healthcheck command does not work after the update. To work around this problem, install the ipa-healthcheck-0.4 sub-package manually using yum install ipa-healthcheck-0.4 . ( BZ#1852244 ) Potential risk when using the default value for ldap_id_use_start_tls option When using ldap:// without TLS for identity lookups, it can pose a risk for an attack vector. Particularly a man-in-the-middle (MITM) attack which could allow an attacker to impersonate a user by altering, for example, the UID or GID of an object returned in an LDAP search. Currently, the SSSD configuration option to enforce TLS, ldap_id_use_start_tls , defaults to false . Ensure that your setup operates in a trusted environment and decide if it is safe to use unencrypted communication for id_provider = ldap . Note id_provider = ad and id_provider = ipa are not affected as they use encrypted connections protected by SASL and GSSAPI. If it is not safe to use unencrypted communication, enforce TLS by setting the ldap_id_use_start_tls option to true in the /etc/sssd/sssd.conf file. The default behavior is planned to be changed in a future release of RHEL. (JIRA:RHELPLAN-155168) SSSD retrieves incomplete list of members if the group size exceeds 1500 members During the integration of SSSD with Active Directory, SSSD retrieves incomplete group member lists when the group size exceeds 1500 members. This issue occurs because Active Directory's MaxValRange policy, which restricts the number of members retrievable in a single query, is set to 1500 by default. To work around this problem, change the MaxValRange setting in Active Directory to accommodate larger group sizes. (JIRA:RHELDOCS-19603) 5.7.11. Desktop Limitations of the Wayland session With Red Hat Enterprise Linux 8, the GNOME environment and the GNOME Display Manager (GDM) use Wayland as the default session type instead of the X11 session, which was used with the major version of RHEL. The following features are currently unavailable or do not work as expected under Wayland : X11 configuration utilities, such as xrandr , do not work under Wayland due to its different approach to handling, resolutions, rotations, and layout. You can configure the display features using GNOME settings. Screen recording and remote desktop require applications to support the portal API on Wayland . Certain legacy applications do not support the portal API. Pointer accessibility is not available on Wayland . No clipboard manager is available. GNOME Shell on Wayland ignores keyboard grabs issued by most legacy X11 applications. You can enable an X11 application to issue keyboard grabs using the /org/gnome/mutter/wayland/xwayland-grab-access-rules GSettings key. By default, GNOME Shell on Wayland enables the following applications to issue keyboard grabs: GNOME Boxes Vinagre Xephyr virt-manager , virt-viewer , and remote-viewer vncviewer Wayland inside guest virtual machines (VMs) has stability and performance problems. RHEL automatically falls back to the X11 session when running in a VM. If you upgrade to RHEL 8 from a RHEL 7 system where you used the X11 GNOME session, your system continues to use X11 . The system also automatically falls back to X11 when the following graphics drivers are in use: The proprietary NVIDIA driver The cirrus driver The mga driver The aspeed driver You can disable the use of Wayland manually: To disable Wayland in GDM, set the WaylandEnable=false option in the /etc/gdm/custom.conf file. To disable Wayland in the GNOME session, select the legacy X11 option by using the cogwheel menu on the login screen after entering your login name. For more details on Wayland , see https://wayland.freedesktop.org/ . ( BZ#1797409 ) Drag-and-drop does not work between desktop and applications Due to a bug in the gnome-shell-extensions package, the drag-and-drop functionality does not currently work between desktop and applications. Support for this feature will be added back in a future release. ( BZ#1717947 ) Disabling flatpak repositories from Software Repositories is not possible Currently, it is not possible to disable or remove flatpak repositories in the Software Repositories tool in the GNOME Software utility. ( BZ#1668760 ) Generation 2 RHEL 8 virtual machines sometimes fail to boot on Hyper-V Server 2016 hosts When using RHEL 8 as the guest operating system on a virtual machine (VM) running on a Microsoft Hyper-V Server 2016 host, the VM in some cases fails to boot and returns to the GRUB boot menu. In addition, the following error is logged in the Hyper-V event log: This error occurs due to a UEFI firmware bug on the Hyper-V host. To work around this problem, use Hyper-V Server 2019 as the host. (BZ#1583445) System crash may result in fadump configuration loss This issue is observed on systems where firmware-assisted dump (fadump) is enabled, and the boot partition is located on a journaling file system such as XFS. A system crash might cause the boot loader to load an older initrd that does not have the dump capturing support enabled. Consequently, after recovery, the system does not capture the vmcore file, which results in fadump configuration loss. To work around this problem: If /boot is a separate partition, perform the following: Restart the kdump service Run the following commands as the root user, or using a user account with CAP_SYS_ADMIN rights: If /boot is not a separate partition, reboot the system. (BZ#1723501) 5.7.12. Graphics infrastructures Unable to run graphical applications using sudo command When trying to run graphical applications as a user with elevated privileges, the application fails to open with an error message. The failure happens because Xwayland is restricted by the Xauthority file to use regular user credentials for authentication. To work around this problem, use the sudo -E command to run graphical applications as a root user. ( BZ#1673073 ) radeon fails to reset hardware correctly The radeon kernel driver currently does not reset hardware in the kexec context correctly. Instead, radeon falls over, which causes the rest of the kdump service to fail. To work around this problem, blacklist radeon in kdump by adding the following line to the /etc/kdump.conf file: Restart the machine and kdump . After starting kdump , the force_rebuild 1 line may be removed from the configuration file. Note that in this scenario, no graphics will be available during kdump , but kdump will work successfully. (BZ#1694705) Multiple HDR displays on a single MST topology may not power on On systems using NVIDIA Turing GPUs with the nouveau driver, using a DisplayPort hub (such as a laptop dock) with multiple monitors which support HDR plugged into it may result in failure to turn on all displays despite having done so on RHEL releases. This is due to the system erroneously thinking there is not enough bandwidth on the hub to support all of the displays. (BZ#1812577) 5.7.13. The web console Unprivileged users can access the Subscriptions page If a non-administrator navigates to the Subscriptions page of the web console, the web console displays a generic error message Cockpit had an unexpected internal error . To work around this problem, sign in to the web console with a privileged user and make sure to check the Reuse my password for privileged tasks checkbox. ( BZ#1674337 ) 5.7.14. Virtualization Low GUI display performance in RHEL 8 virtual machines on a Windows Server 2019 host When using RHEL 8 as a guest operating system in graphical mode on a Windows Server 2019 host, the GUI display performance is low, and connecting to a console output of the guest currently takes significantly longer than expected. This is a known issue on Windows 2019 hosts and is pending a fix by Microsoft. To work around this problem, connect to the guest using SSH or use Windows Server 2016 as the host. (BZ#1706541) Displaying multiple monitors of virtual machines that use Wayland is not possible with QXL Using the remote-viewer utility to display more than one monitor of a virtual machine (VM) that is using the Wayland display server causes the VM to become unresponsive and the Waiting for display status message to be displayed indefinitely. To work around this problem, use virtio-gpu instead of qxl as the GPU device for VMs that use Wayland. (BZ#1642887) virsh iface-\* commands do not work consistently Currently, virsh iface-* commands, such as virsh iface-start and virsh iface-destroy , frequently fail due to configuration dependencies. Therefore, it is recommended not to use virsh iface-\* commands for configuring and managing host network connections. Instead, use the NetworkManager program and its related management applications. (BZ#1664592) RHEL 8 virtual machines sometimes cannot boot on Witherspoon hosts RHEL 8 virtual machines (VMs) that use the pseries-rhel7.6.0-sxxm machine type in some cases fail to boot on Power9 S922LC for HPC hosts (also known as Witherspoon) that use the DD2.2 or DD2.3 CPU. Attempting to boot such a VM instead generates the following error message: To work around this problem, configure the virtual machine's XML configuration as follows: ( BZ#1732726 , BZ#1751054 ) IBM POWER virtual machines do not work correctly with empty NUMA nodes Currently, when an IBM POWER virtual machine (VM) running on a RHEL 8 host is configured with a NUMA node that uses zero memory ( memory='0' ) and zero CPUs, the VM cannot start. Therefore, Red Hat strongly recommends not using IBM POWER VMs with such empty NUMA nodes on RHEL 8. (BZ#1651474) SMT CPU topology is not detected by VMs when using host passthrough mode on AMD EPYC When a virtual machine (VM) boots with the CPU host passthrough mode on an AMD EPYC host, the TOPOEXT CPU feature flag is not present. Consequently, the VM is not able to detect a virtual CPU topology with multiple threads per core. To work around this problem, boot the VM with the EPYC CPU model instead of host passthrough. ( BZ#1740002 ) Disk identifiers in RHEL 8.2 VMs may change on VM reboot. When using a virtual machine (VM) with RHEL 8.2 as the guest operating system on a Hyper-V hypervisor, the device identifiers for the VM's virtual disks in some cases change when the VM reboots. For example, a disk originally identified as /dev/sda may become /dev/sdb . As a consequence, the VM might fail to boot, and scripts that reference disks of the VM might stop working. To avoid this issue, Red Hat strongly recommends to set persistent names for the disks in the VM. For detailed information, see the Microsoft Azure documentation: https://docs.microsoft.com/en-us/azure/virtual-machines/troubleshooting/troubleshoot-device-names-problems . (BZ#1777283) Virtual machines sometimes fail to start when using many virtio-blk disks Adding a large number of virtio-blk devices to a virtual machine (VM) may exhaust the number of interrupt vectors available in the platform. If this occurs, the VM's guest OS fails to boot, and displays a dracut-initqueue[392]: Warning: Could not boot error. ( BZ#1719687 ) Attaching LUN devices to virtual machines using virtio-blk does not work The q35 machine type does not support transitional virtio 1.0 devices, and RHEL 8 therefore lacks support for features that were deprecated in virtio 1.0. In particular, it is not possible on a RHEL 8 host to send SCSI commands from virtio-blk devices. As a consequence, attaching a physical disk as a LUN device to a virtual machine fails when using the virtio-blk controller. Note that physical disks can still be passed through to the guest operating system, but they should be configured with the device='disk' option rather than device='lun' . (BZ#1777138) Migrating a POWER9 guest from a RHEL 7-ALT host to RHEL 8 fails Currently, migrating a POWER9 virtual machine from a RHEL 7-ALT host system to RHEL 8 becomes unresponsive with a "Migration status: active" status. To work around this problem, disable Transparent Huge Pages (THP) on the RHEL 7-ALT host, which enables the migration to complete successfully. (BZ#1741436) 5.7.15. Supportability redhat-support-tool does not work with the FUTURE crypto policy Because a cryptographic key used by a certificate on the Customer Portal API does not meet the requirements by the FUTURE system-wide cryptographic policy, the redhat-support-tool utility does not work with this policy level at the moment. To work around this problem, use the DEFAULT crypto policy while connecting to the Customer Portal API. ( BZ#1802026 ) 5.7.16. Containers UDICA is not expected to work with 1.0 stable stream UDICA, the tool to generate SELinux policies for containers, is not expected to work with containers that are run via podman 1.0.x in the container-tools:1.0 module stream. (JIRA:RHELPLAN-25571) Notes on FIPS support with Podman The Federal Information Processing Standard (FIPS) requires certified modules to be used. Previously, Podman correctly installed certified modules in containers by enabling the proper flags at startup. However, in this release, Podman does not properly set up the additional application helpers normally provided by the system in the form of the FIPS system-wide crypto-policy. Although setting the system-wide crypto-policy is not required by the certified modules it does improve the ability of applications to use crypto modules in compliant ways. To work around this problem, change your container to run the update-crypto-policies --set FIPS command before any other application code is executed. ( BZ#1804193 )
[ "C_CREATEREPOLIB: Critical: Cannot dump XML for PACKAGE_NAME (PACKAGE_SUM): Forbidden control chars found (ASCII values <32 except 9, 10 and 13)", "DECISION PERMISSION SUBJECT : OBJECT", "allow perm=open exe=/usr/bin/rpm : all", "perf stat -e cycles --per-die -a -- sleep 1 Performance counter stats for 'system wide': S0-D0 8 21,029,877 cycles S0-D1 8 19,192,372 cycles", "yum install python38 yum install python38-Cython", "python3.8 python3.8 -m cython --help", "yum install python3-mod_wsgi", "yum install python38-mod_wsgi", "yum module install maven:3.6", "podman pull registry.redhat.io/<image_name>", "yum install gcc-toolset-9", "scl enable gcc-toolset-9 tool", "scl enable gcc-toolset-9 bash", "yum module install rust-toolset", "yum module install llvm-toolset", "yum module install go-toolset", "yum install delve", "dlv debug helloworld.go", "server min protocol = NT1", "systemctl restart smb", "client min protocol = NT1", "systemctl restart smb", "dsconf -D \"cn=Directory Manager\" __ldap://server.example.com__ security set --tls-protocol-min TLS1.3", "echo 3 > /proc/sys/kernel/panic_print", "skip_if_unavailable=false", "postconf -e \"smtp_host_lookup = dns\" postconf -e \"smtp_dns_resolver_options = res_defnames, res_dnsrch\"", "postconf -e \"relayhost = [smtp]\"", "WARNING: AllowZoneDrifting is enabled. This is considered an insecure configuration option. It will be removed in a future release. Please consider disabling it now.", "WARNING: CPU: 38 PID: 928 at arch/x86/mm/init_64.c:850 add_pages+0x5c/0x60 [..] RIP: 0010:add_pages+0x5c/0x60 [..] Call Trace: devm_memremap_pages+0x460/0x6e0 pmem_attach_disk+0x29e/0x680 [nd_pmem] ? nd_dax_probe+0xfc/0x120 [libnvdimm] nvdimm_bus_probe+0x66/0x160 [libnvdimm]", "Error: \"-bash: echo:write error: Invalid argument\"", "acl \"trusted\" { 192.168.0.0/16; 10.153.154.0/24; localhost; localnets; };", "allow-query { any; }; allow-recursion { trusted; }; allow-query-cache { trusted; };", "echo 'add_dracutmodules+=\" network-manager \"' > /etc/dracut.conf.d/enable-nm.conf dracut -vf --regenerate-all", "xfs_info /mount-point | grep ftype", "<memtune> <hard_limit unit='KiB'>N</hard_limit> </memtune>", "wget --trust-server-names --input-metalink`", "wget --trust-server-names --input-metalink <(curl -s USDURL)", "update-crypto-policies --set LEGACY", "~]# yum install network-scripts", "url --url=https://SERVER/PATH --noverifyssl", "inst.ks=<URL> inst.noverifyssl", "systemctl start systemd-resolved", "dnf module enable libselinux-python dnf install libselinux-python", "dnf module install libselinux-python:2.8/common", "NONE:+VERS-ALL:-VERS-TLS1.3:+MAC-ALL:+DHE-RSA:+AES-256-GCM:+SIGN-RSA-SHA384:+COMP-ALL:+GROUP-ALL", "NONE:+VERS-ALL:-VERS-TLS1.3:+MAC-ALL:+ECDHE-RSA:+AES-128-CBC:+SIGN-RSA-SHA1:+COMP-ALL:+GROUP-ALL", "SignatureAlgorithms = RSA+SHA256:RSA+SHA512:RSA+SHA384:ECDSA+SHA256:ECDSA+SHA512:ECDSA+SHA384 MaxProtocol = TLSv1.2", "Error generating remediation role .../remediation.sh: Exit code of oscap was 1: [output truncated]", "iptables -t nat -n -L iptables -t mangle -n -L", "SyntaxError: Missing parentheses in call to 'print'", "rpm2cpio libhugetlbfs-utils-2.21-3.el8.x86_64.rpm | cpio -D / -iduv '*/huge_page_setup_helper.py'", "systemctl restart kdump.service", "add_drivers+=\"nd_pmem nd_btt libnvdimm papr_scm\"", "touch /etc/kdump.conf systemctl restart kdump.service", "systemctl edit multipathd", "[Service] TimeoutSec=300", "systemctl daemon-reload", "opcache.huge_code_pages=0", "Error: Problem: conflicting requests - nothing provides mod_wsgi needed by package-requires-mod_wsgi.el8.noarch", "probe kernel.function(\"can_nice\").call { }", "global in_can_nice% probe kernel.function(\"can_nice\").call { in_can_nice[tid()] ++; if (in_can_nice[tid()] > 1) { next } /* code for real probe handler */ } probe kernel.function(\"can_nice\").return { in_can_nice[tid()] --; }", "enable_files_domain=False", "UnboundLocalError: local variable 'sig' referenced before assignment", "The guest operating system reported that it failed with the following error code: 0x1E", "fsfreeze -f fsfreeze -u", "dracut_args --omit-drivers \"radeon\" force_rebuild 1", "qemu-kvm: Requested safe indirect branch capability level not supported by kvm", "<domain type='qemu' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'> <qemu:commandline> <qemu:arg value='-machine'/> <qemu:arg value='cap-ibs=workaround'/> </qemu:commandline>" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/8.2_release_notes/rhel-8-2-0-release
Chapter 64. SFTP Sink
Chapter 64. SFTP Sink Send data to an SFTP Server. The Kamelet expects the following headers to be set: file / ce-file : as the file name to upload If the header won't be set the exchange ID will be used as file name. 64.1. Configuration Options The following table summarizes the configuration options available for the sftp-sink Kamelet: Property Name Description Type Default Example connectionHost * Connection Host Hostname of the FTP server string connectionPort * Connection Port Port of the FTP server string 22 directoryName * Directory Name The starting directory string password * Password The password to access the FTP server string username * Username The username to access the FTP server string fileExist File Existence How to behave in case of file already existent. There are 4 enums and the value can be one of Override, Append, Fail or Ignore string "Override" passiveMode Passive Mode Sets passive mode connection boolean false Note Fields marked with an asterisk (*) are mandatory. 64.2. Dependencies At runtime, the sftp-sink Kamelet relies upon the presence of the following dependencies: camel:ftp camel:core camel:kamelet 64.3. Usage This section describes how you can use the sftp-sink . 64.3.1. Knative Sink You can use the sftp-sink Kamelet as a Knative sink by binding it to a Knative object. sftp-sink-binding.yaml apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: sftp-sink-binding spec: source: ref: kind: Channel apiVersion: messaging.knative.dev/v1 name: mychannel sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: sftp-sink properties: connectionHost: "The Connection Host" directoryName: "The Directory Name" password: "The Password" username: "The Username" 64.3.1.1. Prerequisite Make sure you have "Red Hat Integration - Camel K" installed into the OpenShift cluster you're connected to. 64.3.1.2. Procedure for using the cluster CLI Save the sftp-sink-binding.yaml file to your local drive, and then edit it as needed for your configuration. Run the sink by using the following command: oc apply -f sftp-sink-binding.yaml 64.3.1.3. Procedure for using the Kamel CLI Configure and run the sink by using the following command: kamel bind channel:mychannel sftp-sink -p "sink.connectionHost=The Connection Host" -p "sink.directoryName=The Directory Name" -p "sink.password=The Password" -p "sink.username=The Username" This command creates the KameletBinding in the current namespace on the cluster. 64.3.2. Kafka Sink You can use the sftp-sink Kamelet as a Kafka sink by binding it to a Kafka topic. sftp-sink-binding.yaml apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: sftp-sink-binding spec: source: ref: kind: KafkaTopic apiVersion: kafka.strimzi.io/v1beta1 name: my-topic sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: sftp-sink properties: connectionHost: "The Connection Host" directoryName: "The Directory Name" password: "The Password" username: "The Username" 64.3.2.1. Prerequisites Ensure that you've installed the AMQ Streams operator in your OpenShift cluster and created a topic named my-topic in the current namespace. Make also sure you have "Red Hat Integration - Camel K" installed into the OpenShift cluster you're connected to. 64.3.2.2. Procedure for using the cluster CLI Save the sftp-sink-binding.yaml file to your local drive, and then edit it as needed for your configuration. Run the sink by using the following command: oc apply -f sftp-sink-binding.yaml 64.3.2.3. Procedure for using the Kamel CLI Configure and run the sink by using the following command: kamel bind kafka.strimzi.io/v1beta1:KafkaTopic:my-topic sftp-sink -p "sink.connectionHost=The Connection Host" -p "sink.directoryName=The Directory Name" -p "sink.password=The Password" -p "sink.username=The Username" This command creates the KameletBinding in the current namespace on the cluster. 64.4. Kamelet source file https://github.com/openshift-integration/kamelet-catalog/sftp-sink.kamelet.yaml
[ "apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: sftp-sink-binding spec: source: ref: kind: Channel apiVersion: messaging.knative.dev/v1 name: mychannel sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: sftp-sink properties: connectionHost: \"The Connection Host\" directoryName: \"The Directory Name\" password: \"The Password\" username: \"The Username\"", "apply -f sftp-sink-binding.yaml", "kamel bind channel:mychannel sftp-sink -p \"sink.connectionHost=The Connection Host\" -p \"sink.directoryName=The Directory Name\" -p \"sink.password=The Password\" -p \"sink.username=The Username\"", "apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: sftp-sink-binding spec: source: ref: kind: KafkaTopic apiVersion: kafka.strimzi.io/v1beta1 name: my-topic sink: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: sftp-sink properties: connectionHost: \"The Connection Host\" directoryName: \"The Directory Name\" password: \"The Password\" username: \"The Username\"", "apply -f sftp-sink-binding.yaml", "kamel bind kafka.strimzi.io/v1beta1:KafkaTopic:my-topic sftp-sink -p \"sink.connectionHost=The Connection Host\" -p \"sink.directoryName=The Directory Name\" -p \"sink.password=The Password\" -p \"sink.username=The Username\"" ]
https://docs.redhat.com/en/documentation/red_hat_build_of_apache_camel_k/1.10.9/html/kamelets_reference/sftp-sink
Chapter 6. Geo-replication
Chapter 6. Geo-replication Note Currently, the geo-replication feature is not supported on IBM Power. Geo-replication allows multiple, geographically distributed Red Hat Quay deployments to work as a single registry from the perspective of a client or user. It significantly improves push and pull performance in a globally-distributed Red Hat Quay setup. Image data is asynchronously replicated in the background with transparent failover and redirect for clients. Deployments of Red Hat Quay with geo-replication is supported on standalone and Operator deployments. Additional resources For more information about the geo-replication feature's architecture, see the architecture guide , which includes technical diagrams and a high-level overview. 6.1. Geo-replication features When geo-replication is configured, container image pushes will be written to the preferred storage engine for that Red Hat Quay instance. This is typically the nearest storage backend within the region. After the initial push, image data will be replicated in the background to other storage engines. The list of replication locations is configurable and those can be different storage backends. An image pull will always use the closest available storage engine, to maximize pull performance. If replication has not been completed yet, the pull will use the source storage backend instead. 6.2. Geo-replication requirements and constraints In geo-replicated setups, Red Hat Quay requires that all regions are able to read and write to all other region's object storage. Object storage must be geographically accessible by all other regions. In case of an object storage system failure of one geo-replicating site, that site's Red Hat Quay deployment must be shut down so that clients are redirected to the remaining site with intact storage systems by a global load balancer. Otherwise, clients will experience pull and push failures. Red Hat Quay has no internal awareness of the health or availability of the connected object storage system. Users must configure a global load balancer (LB) to monitor the health of your distributed system and to route traffic to different sites based on their storage status. To check the status of your geo-replication deployment, you must use the /health/endtoend checkpoint, which is used for global health monitoring. You must configure the redirect manually using the /health/endtoend endpoint. The /health/instance end point only checks local instance health. If the object storage system of one site becomes unavailable, there will be no automatic redirect to the remaining storage system, or systems, of the remaining site, or sites. Geo-replication is asynchronous. The permanent loss of a site incurs the loss of the data that has been saved in that sites' object storage system but has not yet been replicated to the remaining sites at the time of failure. A single database, and therefore all metadata and Red Hat Quay configuration, is shared across all regions. Geo-replication does not replicate the database. In the event of an outage, Red Hat Quay with geo-replication enabled will not failover to another database. A single Redis cache is shared across the entire Red Hat Quay setup and needs to be accessible by all Red Hat Quay pods. The exact same configuration should be used across all regions, with exception of the storage backend, which can be configured explicitly using the QUAY_DISTRIBUTED_STORAGE_PREFERENCE environment variable. Geo-replication requires object storage in each region. It does not work with local storage. Each region must be able to access every storage engine in each region, which requires a network path. Alternatively, the storage proxy option can be used. The entire storage backend, for example, all blobs, is replicated. Repository mirroring, by contrast, can be limited to a repository, or an image. All Red Hat Quay instances must share the same entrypoint, typically through a load balancer. All Red Hat Quay instances must have the same set of superusers, as they are defined inside the common configuration file. Geo-replication requires your Clair configuration to be set to unmanaged . An unmanaged Clair database allows the Red Hat Quay Operator to work in a geo-replicated environment, where multiple instances of the Red Hat Quay Operator must communicate with the same database. For more information, see Advanced Clair configuration . Geo-Replication requires SSL/TLS certificates and keys. For more information, see * Geo-Replication requires SSL/TLS certificates and keys. For more information, see Proof of concept deployment using SSL/TLS certificates . . If the above requirements cannot be met, you should instead use two or more distinct Red Hat Quay deployments and take advantage of repository mirroring functions. 6.2.1. Setting up geo-replication on OpenShift Container Platform Use the following procedure to set up geo-replication on OpenShift Container Platform. Procedure Deploy a postgres instance for Red Hat Quay. Login to the database by entering the following command: psql -U <username> -h <hostname> -p <port> -d <database_name> Create a database for Red Hat Quay named quay . For example: CREATE DATABASE quay; Enable pg_trm extension inside the database \c quay; CREATE EXTENSION IF NOT EXISTS pg_trgm; Deploy a Redis instance: Note Deploying a Redis instance might be unnecessary if your cloud provider has its own service. Deploying a Redis instance is required if you are leveraging Builders. Deploy a VM for Redis Verify that it is accessible from the clusters where Red Hat Quay is running Port 6379/TCP must be open Run Redis inside the instance sudo dnf install -y podman podman run -d --name redis -p 6379:6379 redis Create two object storage backends, one for each cluster. Ideally, one object storage bucket will be close to the first, or primary, cluster, and the other will run closer to the second, or secondary, cluster. Deploy the clusters with the same config bundle, using environment variable overrides to select the appropriate storage backend for an individual cluster. Configure a load balancer to provide a single entry point to the clusters. 6.2.1.1. Configuring geo-replication for the Red Hat Quay on OpenShift Container Platform Use the following procedure to configure geo-replication for the Red Hat Quay on OpenShift Container Platform. Procedure Create a config.yaml file that is shared between clusters. This config.yaml file contains the details for the common PostgreSQL, Redis and storage backends: Geo-replication config.yaml file SERVER_HOSTNAME: <georep.quayteam.org or any other name> 1 DB_CONNECTION_ARGS: autorollback: true threadlocals: true DB_URI: postgresql://postgres:[email protected]:5432/quay 2 BUILDLOGS_REDIS: host: 10.19.0.2 port: 6379 USER_EVENTS_REDIS: host: 10.19.0.2 port: 6379 DATABASE_SECRET_KEY: 0ce4f796-c295-415b-bf9d-b315114704b8 DISTRIBUTED_STORAGE_CONFIG: usstorage: - GoogleCloudStorage - access_key: GOOGQGPGVMASAAMQABCDEFG bucket_name: georep-test-bucket-0 secret_key: AYWfEaxX/u84XRA2vUX5C987654321 storage_path: /quaygcp eustorage: - GoogleCloudStorage - access_key: GOOGQGPGVMASAAMQWERTYUIOP bucket_name: georep-test-bucket-1 secret_key: AYWfEaxX/u84XRA2vUX5Cuj12345678 storage_path: /quaygcp DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: - usstorage - eustorage DISTRIBUTED_STORAGE_PREFERENCE: - usstorage - eustorage FEATURE_STORAGE_REPLICATION: true 1 A proper SERVER_HOSTNAME must be used for the route and must match the hostname of the global load balancer. 2 To retrieve the configuration file for a Clair instance deployed using the OpenShift Container Platform Operator, see Retrieving the Clair config . Create the configBundleSecret by entering the following command: USD oc create secret generic --from-file config.yaml=./config.yaml georep-config-bundle In each of the clusters, set the configBundleSecret and use the QUAY_DISTRIBUTED_STORAGE_PREFERENCE environmental variable override to configure the appropriate storage for that cluster. For example: Note The config.yaml file between both deployments must match. If making a change to one cluster, it must also be changed in the other. US cluster QuayRegistry example apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: example-registry namespace: quay-enterprise spec: configBundleSecret: georep-config-bundle components: - kind: objectstorage managed: false - kind: route managed: true - kind: tls managed: false - kind: postgres managed: false - kind: clairpostgres managed: false - kind: redis managed: false - kind: quay managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: usstorage - kind: mirror managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: usstorage Note Because SSL/TLS is unmanaged, and the route is managed, you must supply the certificates directly in the config bundle. For more information, see Configuring TLS and routes . European cluster apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: example-registry namespace: quay-enterprise spec: configBundleSecret: georep-config-bundle components: - kind: objectstorage managed: false - kind: route managed: true - kind: tls managed: false - kind: postgres managed: false - kind: clairpostgres managed: false - kind: redis managed: false - kind: quay managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: eustorage - kind: mirror managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: eustorage Note Because SSL/TLS is unmanaged, and the route is managed, you must supply the certificates directly in the config bundle. For more information, see Configuring TLS and routes . 6.2.2. Mixed storage for geo-replication Red Hat Quay geo-replication supports the use of different and multiple replication targets, for example, using AWS S3 storage on public cloud and using Ceph storage on premise. This complicates the key requirement of granting access to all storage backends from all Red Hat Quay pods and cluster nodes. As a result, it is recommended that you use the following: A VPN to prevent visibility of the internal storage, or A token pair that only allows access to the specified bucket used by Red Hat Quay This results in the public cloud instance of Red Hat Quay having access to on-premise storage, but the network will be encrypted, protected, and will use ACLs, thereby meeting security requirements. If you cannot implement these security measures, it might be preferable to deploy two distinct Red Hat Quay registries and to use repository mirroring as an alternative to geo-replication. 6.3. Upgrading a geo-replication deployment of Red Hat Quay on OpenShift Container Platform Use the following procedure to upgrade your geo-replicated Red Hat Quay on OpenShift Container Platform deployment. Important When upgrading geo-replicated Red Hat Quay on OpenShift Container Platform deployment to the y-stream release (for example, Red Hat Quay 3.7 Red Hat Quay 3.8), you must stop operations before upgrading. There is intermittent downtime down upgrading from one y-stream release to the . It is highly recommended to back up your Red Hat Quay on OpenShift Container Platform deployment before upgrading. Procedure This procedure assumes that you are running the Red Hat Quay registry on three or more systems. For this procedure, we will assume three systems named System A, System B, and System C . System A will serve as the primary system in which the Red Hat Quay Operator is deployed. On System B and System C, scale down your Red Hat Quay registry. This is done by disabling auto scaling and overriding the replica county for Red Hat Quay, mirror workers, and Clair if it is managed. Use the following quayregistry.yaml file as a reference: apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: registry namespace: ns spec: components: ... - kind: horizontalpodautoscaler managed: false 1 - kind: quay managed: true overrides: 2 replicas: 0 - kind: clair managed: true overrides: replicas: 0 - kind: mirror managed: true overrides: replicas: 0 ... 1 Disable auto scaling of Quay , Clair and Mirroring workers 2 Set the replica count to 0 for components accessing the database and objectstorage Note You must keep the Red Hat Quay registry running on System A. Do not update the quayregistry.yaml file on System A. Wait for the registry-quay-app , registry-quay-mirror , and registry-clair-app pods to disappear. Enter the following command to check their status: oc get pods -n <quay-namespace> Example output quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m On System A, initiate a Red Hat Quay upgrade to the latest y-stream version. This is a manual process. For more information about upgrading installed Operators, see Upgrading installed Operators . For more information about Red Hat Quay upgrade paths, see Upgrading the Red Hat Quay Operator . After the new Red Hat Quay registry is installed, the necessary upgrades on the cluster are automatically completed. Afterwards, new Red Hat Quay pods are started with the latest y-stream version. Additionally, new Quay pods are scheduled and started. Confirm that the update has properly worked by navigating to the Red Hat Quay UI: In the OpenShift console, navigate to Operators Installed Operators , and click the Registry Endpoint link. Important Do not execute the following step until the Red Hat Quay UI is available. Do not upgrade the Red Hat Quay registry on System B and on System C until the UI is available on System A. Confirm that the update has properly worked on System A, initiate the Red Hat Quay upgrade on System B and on System C. The Operator upgrade results in an upgraded Red Hat Quay installation, and the pods are restarted. Note Because the database schema is correct for the new y-stream installation, the new pods on System B and on System C should quickly start. After updating, revert the changes made in step 1 of this procedure by removing overrides for the components. For example: apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: registry namespace: ns spec: components: ... - kind: horizontalpodautoscaler managed: true 1 - kind: quay managed: true - kind: clair managed: true - kind: mirror managed: true ... 1 If the horizontalpodautoscaler resource was set to true before the upgrade procedure, or if you want Red Hat Quay to scale in case of a resource shortage, set it to true . 6.3.1. Removing a geo-replicated site from your Red Hat Quay on OpenShift Container Platform deployment By using the following procedure, Red Hat Quay administrators can remove sites in a geo-replicated setup. Prerequisites You are logged into OpenShift Container Platform. You have configured Red Hat Quay geo-replication with at least two sites, for example, usstorage and eustorage . Each site has its own Organization, Repository, and image tags. Procedure Sync the blobs between all of your defined sites by running the following command: USD python -m util.backfillreplication Warning Prior to removing storage engines from your Red Hat Quay config.yaml file, you must ensure that all blobs are synced between all defined sites. When running this command, replication jobs are created which are picked up by the replication worker. If there are blobs that need replicated, the script returns UUIDs of blobs that will be replicated. If you run this command multiple times, and the output from the return script is empty, it does not mean that the replication process is done; it means that there are no more blobs to be queued for replication. Customers should use appropriate judgement before proceeding, as the allotted time replication takes depends on the number of blobs detected. Alternatively, you could use a third party cloud tool, such as Microsoft Azure, to check the synchronization status. This step must be completed before proceeding. In your Red Hat Quay config.yaml file for site usstorage , remove the DISTRIBUTED_STORAGE_CONFIG entry for the eustorage site. Enter the following command to identify your Quay application pods: USD oc get pod -n <quay_namespace> Example output quay390usstorage-quay-app-5779ddc886-2drh2 quay390eustorage-quay-app-66969cd859-n2ssm Enter the following command to open an interactive shell session in the usstorage pod: USD oc rsh quay390usstorage-quay-app-5779ddc886-2drh2 Enter the following command to permanently remove the eustorage site: Important The following action cannot be undone. Use with caution. sh-4.4USD python -m util.removelocation eustorage Example output WARNING: This is a destructive operation. Are you sure you want to remove eustorage from your storage locations? [y/n] y Deleted placement 30 Deleted placement 31 Deleted placement 32 Deleted placement 33 Deleted location eustorage
[ "psql -U <username> -h <hostname> -p <port> -d <database_name>", "CREATE DATABASE quay;", "\\c quay; CREATE EXTENSION IF NOT EXISTS pg_trgm;", "sudo dnf install -y podman run -d --name redis -p 6379:6379 redis", "SERVER_HOSTNAME: <georep.quayteam.org or any other name> 1 DB_CONNECTION_ARGS: autorollback: true threadlocals: true DB_URI: postgresql://postgres:[email protected]:5432/quay 2 BUILDLOGS_REDIS: host: 10.19.0.2 port: 6379 USER_EVENTS_REDIS: host: 10.19.0.2 port: 6379 DATABASE_SECRET_KEY: 0ce4f796-c295-415b-bf9d-b315114704b8 DISTRIBUTED_STORAGE_CONFIG: usstorage: - GoogleCloudStorage - access_key: GOOGQGPGVMASAAMQABCDEFG bucket_name: georep-test-bucket-0 secret_key: AYWfEaxX/u84XRA2vUX5C987654321 storage_path: /quaygcp eustorage: - GoogleCloudStorage - access_key: GOOGQGPGVMASAAMQWERTYUIOP bucket_name: georep-test-bucket-1 secret_key: AYWfEaxX/u84XRA2vUX5Cuj12345678 storage_path: /quaygcp DISTRIBUTED_STORAGE_DEFAULT_LOCATIONS: - usstorage - eustorage DISTRIBUTED_STORAGE_PREFERENCE: - usstorage - eustorage FEATURE_STORAGE_REPLICATION: true", "oc create secret generic --from-file config.yaml=./config.yaml georep-config-bundle", "apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: example-registry namespace: quay-enterprise spec: configBundleSecret: georep-config-bundle components: - kind: objectstorage managed: false - kind: route managed: true - kind: tls managed: false - kind: postgres managed: false - kind: clairpostgres managed: false - kind: redis managed: false - kind: quay managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: usstorage - kind: mirror managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: usstorage", "apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: example-registry namespace: quay-enterprise spec: configBundleSecret: georep-config-bundle components: - kind: objectstorage managed: false - kind: route managed: true - kind: tls managed: false - kind: postgres managed: false - kind: clairpostgres managed: false - kind: redis managed: false - kind: quay managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: eustorage - kind: mirror managed: true overrides: env: - name: QUAY_DISTRIBUTED_STORAGE_PREFERENCE value: eustorage", "apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: registry namespace: ns spec: components: ... - kind: horizontalpodautoscaler managed: false 1 - kind: quay managed: true overrides: 2 replicas: 0 - kind: clair managed: true overrides: replicas: 0 - kind: mirror managed: true overrides: replicas: 0 ...", "get pods -n <quay-namespace>", "quay-operator.v3.7.1-6f9d859bd-p5ftc 1/1 Running 0 12m quayregistry-clair-postgres-7487f5bd86-xnxpr 1/1 Running 1 (12m ago) 12m quayregistry-quay-app-upgrade-xq2v6 0/1 Completed 0 12m quayregistry-quay-redis-84f888776f-hhgms 1/1 Running 0 12m", "apiVersion: quay.redhat.com/v1 kind: QuayRegistry metadata: name: registry namespace: ns spec: components: ... - kind: horizontalpodautoscaler managed: true 1 - kind: quay managed: true - kind: clair managed: true - kind: mirror managed: true ...", "python -m util.backfillreplication", "oc get pod -n <quay_namespace>", "quay390usstorage-quay-app-5779ddc886-2drh2 quay390eustorage-quay-app-66969cd859-n2ssm", "oc rsh quay390usstorage-quay-app-5779ddc886-2drh2", "sh-4.4USD python -m util.removelocation eustorage", "WARNING: This is a destructive operation. Are you sure you want to remove eustorage from your storage locations? [y/n] y Deleted placement 30 Deleted placement 31 Deleted placement 32 Deleted placement 33 Deleted location eustorage" ]
https://docs.redhat.com/en/documentation/red_hat_quay/3.12/html/red_hat_quay_operator_features/georepl-intro
8.163. python-urlgrabber
8.163. python-urlgrabber 8.163.1. RHBA-2013:1117 - python-urlgrabber bug fix update Updated python-urlgrabber packages that fix one bug are now available. The python-urlgrabber package provides urlgrabber, a high-level url-fetching package for the Python programming language and a corresponding utility of the same name. The urlgrabber package allows Python scripts to fetch data using the HTTP and FTP protocols, as well as from a local file system. Bug Fix BZ#807030 Previously, a flaw in the source code resulted in a traceback error when users used the reposync command to synchronize a remote Yum repository to a local directory, when the utime() system call had an error. This update corrects the mistake in the source code, and traceback errors no longer occur in the described scenario. Users of python-urlgrabber are advised to upgrade to these updated packages, which fix this bug.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.5_technical_notes/python-urlgrabber
Administration Guide
Administration Guide Red Hat Ceph Storage 4 Administration of Red Hat Ceph Storage Red Hat Ceph Storage Documentation Team
null
https://docs.redhat.com/en/documentation/red_hat_ceph_storage/4/html/administration_guide/index
Providing feedback on Red Hat documentation
Providing feedback on Red Hat documentation We appreciate your feedback on our documentation. Let us know how we can improve it. Use the Create Issue form in Red Hat Jira to provide your feedback. The Jira issue is created in the Red Hat Satellite Jira project, where you can track its progress. Prerequisites Ensure you have registered a Red Hat account . Procedure Click the following link: Create Issue . If Jira displays a login error, log in and proceed after you are redirected to the form. Complete the Summary and Description fields. In the Description field, include the documentation URL, chapter or section number, and a detailed description of the issue. Do not modify any other fields in the form. Click Create .
null
https://docs.redhat.com/en/documentation/red_hat_satellite/6.16/html/managing_configurations_by_using_puppet_integration/providing-feedback-on-red-hat-documentation_managing-configurations-puppet
Chapter 7. Backing OpenShift Container Platform applications with OpenShift Data Foundation
Chapter 7. Backing OpenShift Container Platform applications with OpenShift Data Foundation You cannot directly install OpenShift Data Foundation during the OpenShift Container Platform installation. However, you can install OpenShift Data Foundation on an existing OpenShift Container Platform by using the Operator Hub and then configure the OpenShift Container Platform applications to be backed by OpenShift Data Foundation. Prerequisites OpenShift Container Platform is installed and you have administrative access to OpenShift Web Console. OpenShift Data Foundation is installed and running in the openshift-storage namespace. Procedure In the OpenShift Web Console, perform one of the following: Click Workloads Deployments . In the Deployments page, you can do one of the following: Select any existing deployment and click Add Storage option from the Action menu (...). Create a new deployment and then add storage. Click Create Deployment to create a new deployment. Edit the YAML based on your requirement to create a deployment. Click Create . Select Add Storage from the Actions drop-down menu on the top right of the page. Click Workloads Deployment Configs . In the Deployment Configs page, you can do one of the following: Select any existing deployment and click Add Storage option from the Action menu (...). Create a new deployment and then add storage. Click Create Deployment Config to create a new deployment. Edit the YAML based on your requirement to create a deployment. Click Create . Select Add Storage from the Actions drop-down menu on the top right of the page. In the Add Storage page, you can choose one of the following options: Click the Use existing claim option and select a suitable PVC from the drop-down list. Click the Create new claim option. Select the appropriate CephFS or RBD storage class from the Storage Class drop-down list. Provide a name for the Persistent Volume Claim. Select ReadWriteOnce (RWO) or ReadWriteMany (RWX) access mode. Note ReadOnlyMany (ROX) is deactivated as it is not supported. Select the size of the desired storage capacity. Note You can expand the block PVs but cannot reduce the storage capacity after the creation of Persistent Volume Claim. Specify the mount path and subpath (if required) for the mount path volume inside the container. Click Save . Verification steps Depending on your configuration, perform one of the following: Click Workloads Deployments . Click Workloads Deployment Configs . Set the Project as required. Click the deployment for which you added storage to display the deployment details. Scroll down to Volumes and verify that your deployment has a Type that matches the Persistent Volume Claim that you assigned. Click the Persistent Volume Claim name and verify the storage class name in the Persistent Volume Claim Overview page.
null
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.9/html/deploying_and_managing_openshift_data_foundation_using_red_hat_openstack_platform/backing-openshift-container-platform-applications-with-openshift-data-foundation_osp
Serving models
Serving models Red Hat OpenShift AI Cloud Service 1 Serve models in Red Hat OpenShift AI Cloud Service
null
https://docs.redhat.com/en/documentation/red_hat_openshift_ai_cloud_service/1/html/serving_models/index
8.4. Configuration Examples
8.4. Configuration Examples 8.4.1. Squid Connecting to Non-Standard Ports The following example provides a real-world demonstration of how SELinux complements Squid by enforcing the above Boolean and by default only allowing access to certain ports. This example will then demonstrate how to change the Boolean and show that access is then allowed. Note that this is an example only and demonstrates how SELinux can affect a simple configuration of Squid. Comprehensive documentation of Squid is beyond the scope of this document. Refer to the official Squid documentation for further details. This example assumes that the Squid host has two network interfaces, Internet access, and that any firewall has been configured to allow access on the internal interface using the default TCP port on which Squid listens (TCP 3128). As the root user, install the squid package. Run the rpm -q squid command to see if the squid package is installed. If it is not installed, run the yum install squid command as the root user to install it. Edit the main configuration file, /etc/squid/squid.conf and confirm that the cache_dir directive is uncommented and looks similar to the following: This line specifies the default settings for the cache_dir directive to be used in this example; it consists of the Squid storage format (ufs), the directory on the system where the cache resides (/var/spool/squid), the amount of disk space in megabytes to be used for the cache (100), and finally the number of first-level and second-level cache directories to be created (16 and 256 respectively). In the same configuration file, make sure the http_access allow localnet directive is uncommented. This allows traffic from the localnet ACL which is automatically configured in a default installation of Squid on Red Hat Enterprise Linux. It will allow client machines on any existing RFC1918 network to have access through the proxy, which is sufficient for this simple example. In the same configuration file, make sure the visible_hostname directive is uncommented and is configured to the host name of the machine. The value should be the fully qualified domain name (FQDN) of the host: As the root user, run the service squid start command to start squid . As this is the first time squid has started, this command will initialise the cache directories as specified above in the cache_dir directive and will then start the squid daemon. The output is as follows if squid starts successfully: Confirm that the squid process ID (PID) has started as a confined service, as seen here by the squid_var_run_t value: At this point, a client machine connected to the localnet ACL configured earlier is successfully able to use the internal interface of this host as its proxy. This can be configured in the settings for all common web browsers, or system-wide. Squid is now listening on the default port of the target machine (TCP 3128), but the target machine will only allow outgoing connections to other services on the Internet via common ports. This is a policy defined by SELinux itself. SELinux will deny access to non-standard ports, as shown in the step: When a client makes a request using a non-standard port through the Squid proxy such as a website listening on TCP port 10000, a denial similar to the following is logged: To allow this access, the squid_connect_any Boolean must be modified, as it is disabled by default. To enable the squid_connect_any Boolean, run the following command as the root user: Note Do not use the -P option if you do not want setsebool changes to persist across reboots. The client will now be able to access non-standard ports on the Internet as Squid is now permitted to initiate connections to any port, on behalf of its clients.
[ "cache_dir ufs /var/spool/squid 100 16 256", "visible_hostname squid.example.com", "~]# /sbin/service squid start init_cache_dir /var/spool/squid... Starting squid: . [ OK ]", "~]# ls -lZ /var/run/squid.pid -rw-r--r--. root squid unconfined_u:object_r: squid_var_run_t :s0 /var/run/squid.pid", "SELinux is preventing the squid daemon from connecting to network port 10000", "~]# setsebool -P squid_connect_any on" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/managing_confined_services/sect-managing_confined_services-squid_caching_proxy-configuration_examples
1.2.3.2. Nessus
1.2.3.2. Nessus Nessus is a full-service security scanner. The plug-in architecture of Nessus allows users to customize it for their systems and networks. As with any scanner, Nessus is only as good as the signature database it relies upon. Fortunately, Nessus is frequently updated and features full reporting, host scanning, and real-time vulnerability searches. Remember that there could be false positives and false negatives, even in a tool as powerful and as frequently updated as Nessus. Note The Nessus client and server software requires a subscription to use. It has been included in this document as a reference to users who may be interested in using this popular application. For more information about Nessus, see the official website at the following URL: http://www.nessus.org/
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/security_guide/sect-security_guide-evaluating_the_tools-nessus
Chapter 2. Installing the OpenShift Serverless Operator
Chapter 2. Installing the OpenShift Serverless Operator Installing the OpenShift Serverless Operator enables you to install and use Knative Serving, Knative Eventing, and the Knative broker for Apache Kafka on a OpenShift Container Platform cluster. The OpenShift Serverless Operator manages Knative custom resource definitions (CRDs) for your cluster and enables you to configure them without directly modifying individual config maps for each component. 2.1. OpenShift Serverless Operator resource requirements The following sample setup might help you to estimate the minimum resource requirements for your OpenShift Serverless Operator installation. Your specific requirements might be significantly different, and might grow as your use of OpenShift Serverless increases. The test suite used in the sample setup has the following parameters: An OpenShift Container Platform cluster with: 10 workers (8 vCPU, 16GiB memory) 3 workers dedicated to Kafka 2 workers dedicated to Prometheus 5 workers remaining for both Serverless and test deployments 89 test scenarios running in parallel, mainly focused on using the control plane. Testing scenarios typically have a Knative Service sending events through an in-memory channel, a Kafka channel, an in-memory broker, or a Kafka broker to either a Deployment or a Knative Service. 48 re-creation scenarios, where the testing scenario is repeatedly being deleted and re-created. 41 stable scenarios, where events are sent throughout the test run slowly but continuously. The test setup contains, in total: 170 Knative Services 20 In-Memory Channels 24 Kafka Channels 52 Subscriptions 42 Brokers 68 Triggers The following table details the minimal resource requirements for a Highly-Available (HA) setup discovered by the test suite: Component RAM resources CPU resources OpenShift Serverless Operator 1GB 0.2 core Knative Serving 5GB 2.5 cores Knative Eventing 2GB 0.5 core Knative broker for Apache Kafka 6GB 1 core Sum 14GB 4.2 cores The following table details the minimal resource requirements for a non-HA setup discovered by the test suite: Component RAM resources CPU resources OpenShift Serverless Operator 1GB 0.2 core Knative Serving 2.5GB 1.2 cores Knative Eventing 1GB 0.2 core Knative broker for Apache Kafka 6GB 1 core Sum 10.5GB 2.6 cores 2.2. Installing the OpenShift Serverless Operator from the web console You can install the OpenShift Serverless Operator from the OperatorHub by using the OpenShift Container Platform web console. Installing this Operator enables you to install and use Knative components. Prerequisites You have cluster administrator permissions on OpenShift Container Platform, or you have cluster or dedicated administrator permissions on Red Hat OpenShift Service on AWS or OpenShift Dedicated. For OpenShift Container Platform, your cluster has the Marketplace capability enabled or the Red Hat Operator catalog source configured manually. You have logged in to the web console. Procedure In the web console, navigate to the Operators OperatorHub page. Scroll, or type the keyword Serverless into the Filter by keyword box to find the OpenShift Serverless Operator. Review the information about the Operator and click Install . On the Install Operator page: The Installation Mode is All namespaces on the cluster (default) . This mode installs the Operator in the default openshift-serverless namespace to watch and be made available to all namespaces in the cluster. The Installed Namespace is openshift-serverless . Select an Update Channel . The stable channel enables installation of the latest stable release of the OpenShift Serverless Operator. The stable channel is the default. To install another version, specify the corresponding stable-x.y channel, for example stable-1.29 . Select the stable channel as the Update Channel . The stable channel will enable installation of the latest stable release of the OpenShift Serverless Operator. Select Automatic or Manual approval strategy. Click Install to make the Operator available to the selected namespaces on this OpenShift Container Platform cluster. From the Catalog Operator Management page, you can monitor the OpenShift Serverless Operator subscription's installation and upgrade progress. If you selected a Manual approval strategy, the subscription's upgrade status will remain Upgrading until you review and approve its install plan. After approving on the Install Plan page, the subscription upgrade status moves to Up to date . If you selected an Automatic approval strategy, the upgrade status should resolve to Up to date without intervention. Verification After the Subscription's upgrade status is Up to date , select Catalog Installed Operators to verify that the OpenShift Serverless Operator eventually shows up and its Status ultimately resolves to InstallSucceeded in the relevant namespace. If it does not: Switch to the Catalog Operator Management page and inspect the Operator Subscriptions and Install Plans tabs for any failure or errors under Status . Check the logs in any pods in the openshift-serverless project on the Workloads Pods page that are reporting issues to troubleshoot further. Important If you want to use Red Hat OpenShift distributed tracing with OpenShift Serverless , you must install and configure Red Hat OpenShift distributed tracing before you install Knative Serving or Knative Eventing. 2.3. Installing the OpenShift Serverless Operator from the CLI You can install the OpenShift Serverless Operator from the OperatorHub by using the CLI. Installing this Operator enables you to install and use Knative components. Prerequisites You have cluster administrator permissions on OpenShift Container Platform, or you have cluster or dedicated administrator permissions on Red Hat OpenShift Service on AWS or OpenShift Dedicated. For OpenShift Container Platform, your cluster has the Marketplace capability enabled or the Red Hat Operator catalog source configured manually. You have logged in to the cluster. Procedure Create a YAML file containing Namespace , OperatorGroup , and Subscription objects to subscribe a namespace to the OpenShift Serverless Operator. For example, create the file serverless-subscription.yaml with the following content: Example subscription --- apiVersion: v1 kind: Namespace metadata: name: openshift-serverless --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: serverless-operators namespace: openshift-serverless spec: {} --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: serverless-operator namespace: openshift-serverless spec: channel: stable 1 name: serverless-operator 2 source: redhat-operators 3 sourceNamespace: openshift-marketplace 4 1 The channel name of the Operator. The stable channel enables installation of the most recent stable version of the OpenShift Serverless Operator. To install another version, specify the corresponding stable-x.y channel, for example stable-1.29 . 2 The name of the Operator to subscribe to. For the OpenShift Serverless Operator, this is always serverless-operator . 3 The name of the CatalogSource that provides the Operator. Use redhat-operators for the default OperatorHub catalog sources. 4 The namespace of the CatalogSource. Use openshift-marketplace for the default OperatorHub catalog sources. Create the Subscription object: Verification Check that the cluster service version (CSV) has reached the Succeeded phase: Example command USD oc get csv Example output NAME DISPLAY VERSION REPLACES PHASE serverless-operator.v1.25.0 Red Hat OpenShift Serverless 1.25.0 serverless-operator.v1.24.0 Succeeded Important If you want to use Red Hat OpenShift distributed tracing with OpenShift Serverless , you must install and configure Red Hat OpenShift distributed tracing before you install Knative Serving or Knative Eventing. 2.4. Global configuration The OpenShift Serverless Operator manages the global configuration of a Knative installation, including propagating values from the KnativeServing and KnativeEventing custom resources to system config maps . Any updates to config maps which are applied manually are overwritten by the Operator. However, modifying the Knative custom resources allows you to set values for these config maps. Knative has multiple config maps that are named with the prefix config- . All Knative config maps are created in the same namespace as the custom resource that they apply to. For example, if the KnativeServing custom resource is created in the knative-serving namespace, all Knative Serving config maps are also created in this namespace. The spec.config in the Knative custom resources have one <name> entry for each config map, named config-<name> , with a value which is be used for the config map data . 2.5. Additional resources for OpenShift Container Platform Managing resources from custom resource definitions Understanding persistent storage Configuring a custom PKI 2.6. steps After the OpenShift Serverless Operator is installed, you can install Knative Serving or install Knative Eventing .
[ "--- apiVersion: v1 kind: Namespace metadata: name: openshift-serverless --- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: serverless-operators namespace: openshift-serverless spec: {} --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: serverless-operator namespace: openshift-serverless spec: channel: stable 1 name: serverless-operator 2 source: redhat-operators 3 sourceNamespace: openshift-marketplace 4", "oc apply -f serverless-subscription.yaml", "oc get csv", "NAME DISPLAY VERSION REPLACES PHASE serverless-operator.v1.25.0 Red Hat OpenShift Serverless 1.25.0 serverless-operator.v1.24.0 Succeeded" ]
https://docs.redhat.com/en/documentation/red_hat_openshift_serverless/1.35/html/installing_openshift_serverless/install-serverless-operator
Chapter 10. Migrating virtual machines from OpenShift Virtualization
Chapter 10. Migrating virtual machines from OpenShift Virtualization 10.1. Adding a Red Hat OpenShift Virtualization source provider You can use a Red Hat OpenShift Virtualization provider as both a source provider and destination provider. Specifically, the host cluster that is automatically added as a OpenShift Virtualization provider can be used as both a source provider and a destination provider. You can migrate VMs from the cluster that MTV is deployed on to another cluster, or from a remote cluster to the cluster that MTV is deployed on. Note The Red Hat OpenShift cluster version of the source provider must be 4.13 or later. Procedure In the Red Hat OpenShift web console, click Migration Providers for virtualization . Click Create Provider . Click OpenShift Virtualization . Specify the following fields: Provider resource name : Name of the source provider URL : URL of the endpoint of the API server Service account bearer token : Token for a service account with cluster-admin privileges If both URL and Service account bearer token are left blank, the local OpenShift cluster is used. Choose one of the following options for validating CA certificates: Use a custom CA certificate : Migrate after validating a custom CA certificate. Use the system CA certificate : Migrate after validating the system CA certificate. Skip certificate validation : Migrate without validating a CA certificate. To use a custom CA certificate, leave the Skip certificate validation switch toggled to left, and either drag the CA certificate to the text box or browse for it and click Select . To use the system CA certificate, leave the Skip certificate validation switch toggled to the left, and leave the CA certificate text box empty. To skip certificate validation, toggle the Skip certificate validation switch to the right. Optional: Ask MTV to fetch a custom CA certificate from the provider's API endpoint URL. Click Fetch certificate from URL . The Verify certificate window opens. If the details are correct, select the I trust the authenticity of this certificate checkbox, and then, click Confirm . If not, click Cancel , and then, enter the correct certificate information manually. Once confirmed, the CA certificate will be used to validate subsequent communication with the API endpoint. Click Create provider to add and save the provider. The provider appears in the list of providers. Optional: Add access to the UI of the provider: On the Providers page, click the provider. The Provider details page opens. Click the Edit icon under External UI web link . Enter the link and click Save . Note If you do not enter a link, MTV attempts to calculate the correct link. If MTV succeeds, the hyperlink of the field points to the calculated link. If MTV does not succeed, the field remains empty. 10.2. Adding an OpenShift Virtualization destination provider You can use a Red Hat OpenShift Virtualization provider as both a source provider and destination provider. Specifically, the host cluster that is automatically added as a OpenShift Virtualization provider can be used as both a source provider and a destination provider. You can also add another OpenShift Virtualization destination provider to the Red Hat OpenShift web console in addition to the default OpenShift Virtualization destination provider, which is the cluster where you installed MTV. You can migrate VMs from the cluster that MTV is deployed on to another cluster, or from a remote cluster to the cluster that MTV is deployed on. Prerequisites You must have an OpenShift Virtualization service account token with cluster-admin privileges. Procedure In the Red Hat OpenShift web console, click Migration Providers for virtualization . Click Create Provider . Click OpenShift Virtualization . Specify the following fields: Provider resource name : Name of the source provider URL : URL of the endpoint of the API server Service account bearer token : Token for a service account with cluster-admin privileges If both URL and Service account bearer token are left blank, the local OpenShift cluster is used. Choose one of the following options for validating CA certificates: Use a custom CA certificate : Migrate after validating a custom CA certificate. Use the system CA certificate : Migrate after validating the system CA certificate. Skip certificate validation : Migrate without validating a CA certificate. To use a custom CA certificate, leave the Skip certificate validation switch toggled to left, and either drag the CA certificate to the text box or browse for it and click Select . To use the system CA certificate, leave the Skip certificate validation switch toggled to the left, and leave the CA certificate text box empty. To skip certificate validation, toggle the Skip certificate validation switch to the right. Optional: Ask MTV to fetch a custom CA certificate from the provider's API endpoint URL. Click Fetch certificate from URL . The Verify certificate window opens. If the details are correct, select the I trust the authenticity of this certificate checkbox, and then, click Confirm . If not, click Cancel , and then, enter the correct certificate information manually. Once confirmed, the CA certificate will be used to validate subsequent communication with the API endpoint. Click Create provider to add and save the provider. The provider appears in the list of providers. 10.3. Selecting a migration network for an OpenShift Virtualization provider You can select a default migration network for an OpenShift Virtualization provider in the Red Hat OpenShift web console to improve performance. The default migration network is used to transfer disks to the namespaces in which it is configured. If you do not select a migration network, the default migration network is the pod network, which might not be optimal for disk transfer. Note You can override the default migration network of the provider by selecting a different network when you create a migration plan. Procedure In the Red Hat OpenShift web console, click Migration > Providers for virtualization . Click the OpenShift Virtualization provider whose migration network you want to change. When the Providers detail page opens: Click the Networks tab. Click Set default transfer network . Select a default transfer network from the list and click Save . 10.4. Creating a migration plan Use the Red Hat OpenShift web console to create a migration plan. Specify the source provider, the virtual machines (VMs) you want to migrate, and other plan details. Warning Do not include virtual machines with guest-initiated storage connections, such as Internet Small Computer Systems Interface (iSCSI) connections or Network File System (NFS) mounts. These require either additional planning before migration or reconfiguration after migration. This prevents concurrent disk access to the storage the guest points to. Important A plan cannot contain more than 500 VMs or 500 disks. Procedure In the Red Hat OpenShift web console, click Plans for virtualization and then click Create Plan . The Create migration plan wizard opens to the Select source provider interface. Select the source provider of the VMs you want to migrate. The Select virtual machines interface opens. Select the VMs you want to migrate and click . The Create migration plan pane opens. It displays the source provider's name and suggestions for a target provider and namespace, a network map, and a storage map. Enter the Plan name . To change the Target provider , the Target namespace , or elements of the Network map or the Storage map , select an item from the relevant list. To add either a Network map or a Storage map , click the + sign anf add a mapping. Click Create migration plan . MTV validates the migration plan, and the Plan details page opens, indicating whether the plan is ready for use or contains an error. The details of the plan are listed, and you can edit the items you filled in on the page. If you make any changes, MTV validates the plan again. Check the following items in the Settings section of the page: Transfer Network : The network used to transfer the VMs to OpenShift Virtualization, by default, this is the default transfer network of the provider. Verify that the transfer network is in the selected target namespace.To edit the transfer network, click the Edit icon, choose a different transfer network from the list in the window that opens, and click Save . You can configure an OpenShift network in the OpenShift web console by clicking Networking > NetworkAttachmentDefinitions . To learn more about the different types of networks OpenShift supports, see Additional Networks in OpenShift Container Platform . If you want to adjust the maximum transmission unit (MTU) of the OpenShift transfer network, you must also change the MTU of the VMware migration network. For more information see Selecting a migration network for a VMware source provider . Target namespace : Destination namespace to be used by all the migrated VMs, by default, this is the current or active namespace. To edit the namespace, click the Edit icon, choose a different target namespace from the list in the window that opens, and click Save . If your plan is valid, you can do one of the following: Run the plan now by clicking Start migration . Run the plan later by selecting it on the Plans for virtualization page and following the procedure in Running a migration plan . 10.5. Running a migration plan You can run a migration plan and view its progress in the Red Hat OpenShift web console. Prerequisites Valid migration plan. Procedure In the Red Hat OpenShift web console, click Migration Plans for virtualization . The Plans list displays the source and target providers, the number of virtual machines (VMs) being migrated, the status, the date that the migration started, and the description of each plan. Click Start beside a migration plan to start the migration. Click Start in the confirmation window that opens. The plan's Status changes to Running , and the migration's progress is displayed. Warm migration only: The precopy stage starts. Click Cutover to complete the migration. Optional: Click the links in the migration's Status to see its overall status and the status of each VM: The link on the left indicates whether the migration failed, succeeded, or is ongoing. It also reports the number of VMs whose migration succeeded, failed, or was canceled. The link on the right opens the Virtual Machines tab of the Plan Details page. For each VM, the tab displays the following data: The name of the VM The start and end times of the migration The amount of data copied A progress pipeline for the VM's migration Warning vMotion, including svMotion, and relocation must be disabled for VMs that are being imported to avoid data corruption. Optional: To view your migration's logs, either as it is running or after it is completed, perform the following actions: Click the Virtual Machines tab. Click the arrow ( > ) to the left of the virtual machine whose migration progress you want to check. The VM's details are displayed. In the Pods section, in the Pod links column, click the Logs link. The Logs tab opens. Note Logs are not always available. The following are common reasons for logs not being available: The migration is from OpenShift Virtualization to OpenShift Virtualization. In this case, virt-v2v is not involved, so no pod is required. No pod was created. The pod was deleted. The migration failed before running the pod. To see the raw logs, click the Raw link. To download the logs, click the Download link. 10.6. Migration plan options On the Plans for virtualization page of the Red Hat OpenShift web console, you can click the Options menu beside a migration plan to access the following options: Edit Plan : Edit the details of a migration plan. If the plan is running or has completed successfully, you cannot edit the following options: All properties on the Settings section of the Plan details page. For example, warm or cold migration, target namespace, and preserved static IPs. The plan's mapping on the Mappings tab. The hooks listed on the Hooks tab. Start migration : Active only if relevant. Restart migration : Restart a migration that was interrupted. Before choosing this option, make sure there are no error messages. If there are, you need to edit the plan. Cutover : Warm migrations only. Active only if relevant. Clicking Cutover opens the Cutover window, which supports the following options: Set cutover : Set the date and time for a cutover. Remove cutover : Cancel a scheduled cutover. Active only if relevant. Duplicate Plan : Create a new migration plan with the same virtual machines (VMs), parameters, mappings, and hooks as an existing plan. You can use this feature for the following tasks: Migrate VMs to a different namespace. Edit an archived migration plan. Edit a migration plan with a different status, for example, failed, canceled, running, critical, or ready. Archive Plan : Delete the logs, history, and metadata of a migration plan. The plan cannot be edited or restarted. It can only be viewed, duplicated, or deleted. Note Archive Plan is irreversible. However, you can duplicate an archived plan. Delete Plan : Permanently remove a migration plan. You cannot delete a running migration plan. Note Delete Plan is irreversible. Deleting a migration plan does not remove temporary resources. To remove temporary resources, archive the plan first before deleting it. 10.7. Canceling a migration You can cancel the migration of some or all virtual machines (VMs) while a migration plan is in progress by using the Red Hat OpenShift web console. Procedure In the Red Hat OpenShift web console, click Plans for virtualization . Click the name of a running migration plan to view the migration details. Select one or more VMs and click Cancel . Click Yes, cancel to confirm the cancellation. In the Migration details by VM list, the status of the canceled VMs is Canceled . The unmigrated and the migrated virtual machines are not affected. You can restart a canceled migration by clicking Restart beside the migration plan on the Migration plans page.
null
https://docs.redhat.com/en/documentation/migration_toolkit_for_virtualization/2.7/html/installing_and_using_the_migration_toolkit_for_virtualization/migrating-virt_cnv
Chapter 32. Using Advanced Error Reporting
Chapter 32. Using Advanced Error Reporting When you use the Advanced Error Reporting ( AER ), you receive notifications of error events for Peripheral Component Interconnect Express ( PCIe ) devices. RHEL enables this kernel feature by default and collects the reported errors in the kernel logs. If you use the rasdaemon program, these errors are parsed and stored in its database. 32.1. Overview of AER Advanced Error Reporting ( AER ) is a kernel feature that provides enhanced error reporting for Peripheral Component Interconnect Express ( PCIe ) devices. The AER kernel driver attaches root ports which support PCIe AER capability in order to: Gather the comprehensive error information Report errors to the users Perform error recovery actions When AER captures an error, it sends an error message to the console. For a repairable error, the console output is a warning . Example 32.1. Example AER output 32.2. Collecting and displaying AER messages To collect and display AER messages, use the rasdaemon program. Procedure Install the rasdaemon package. # yum install rasdaemon Enable and start the rasdaemon service. # systemctl enable --now rasdaemon Created symlink /etc/systemd/system/multi-user.target.wants/rasdaemon.service /usr/lib/systemd/system/rasdaemon.service. Issue the ras-mc-ctl command. # ras-mc-ctl --summary # ras-mc-ctl --errors The command displays a summary of the logged errors (the --summary option) or displays the errors stored in the error database (the --errors option). Additional resources The rasdaemon(8) manual page The ras-mc-ctl(8) manual page
[ "Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: AER: Corrected error received: id=ae00 Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: AER: Multiple Corrected error received: id=ae00 Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: PCIe Bus Error: severity=Corrected, type=Data Link Layer, id=0000(Receiver ID) Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: device [8086:2030] error status/mask=000000c0/00002000 Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: [ 6] Bad TLP Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: [ 7] Bad DLLP Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: AER: Multiple Corrected error received: id=ae00 Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: PCIe Bus Error: severity=Corrected, type=Data Link Layer, id=0000(Receiver ID) Feb 5 15:41:33 hostname kernel: pcieport 10003:00:00.0: device [8086:2030] error status/mask=00000040/00002000", "yum install rasdaemon", "systemctl enable --now rasdaemon Created symlink /etc/systemd/system/multi-user.target.wants/rasdaemon.service /usr/lib/systemd/system/rasdaemon.service.", "ras-mc-ctl --summary ras-mc-ctl --errors" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/managing_monitoring_and_updating_the_kernel/assembly_advanced-error-reporting_managing-monitoring-and-updating-the-kernel
3.2. CPU Performance Options
3.2. CPU Performance Options Several CPU related options are available to your guest virtual machines. Configured correctly, these options can have a large impact on performance. The following image shows the CPU options available to your guests. The remainder of this section shows and explains the impact of these options. Figure 3.3. CPU Performance Options 3.2.1. Option: Available CPUs Use this option to adjust the amount of virtual CPUs (vCPUs) available to the guest. If you allocate more than is available on the host (known as overcommitting ), a warning is displayed, as shown in the following image: Figure 3.4. CPU overcommit CPUs are overcommitted when the sum of vCPUs for all guests on the system is greater than the number of host CPUs on the system. You can overcommit CPUs with one or multiple guests if the total number of vCPUs is greater than the number of host CPUs. Important As with memory overcommitting, CPU overcommitting can have a negative impact on performance, for example in situations with a heavy or unpredictable guest workload. See the Virtualization Deployment and Administration Guide for more details on overcommitting. 3.2.2. Option: CPU Configuration Use this option to select the CPU configuration type, based on the intended CPU model. Click the Copy host CPU configuration check box to detect and apply the physical host's CPU model and configuration, or expand the list to see available options. Once you select a CPU configuration, its available CPU features/instructions are displayed and can be individually enabled/disabled in the CPU Features list. Figure 3.5. CPU Configuration Options Note Copying the host CPU configuration is recommended over manual configuration. Note Alternately, run the virsh capabilities command on your host machine to view the virtualization capabilities of your system, including CPU types and NUMA capabilities. 3.2.3. Option: CPU Topology Use this option to apply a particular CPU topology (Sockets, Cores, Threads) to the virtual CPUs for your guest virtual machine. Figure 3.6. CPU Topology Options Note Although your environment may dictate other requirements, selecting any intended number of sockets, but with only a single core and a single thread usually gives the best performance results.
null
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/virtualization_tuning_and_optimization_guide/sect-virtualization_tuning_optimization_guide-virt_manager-cpu_options
Chapter 11. Managing Ceph Object Gateway using the dashboard
Chapter 11. Managing Ceph Object Gateway using the dashboard As a storage administrator, the Ceph Object Gateway functions of the dashboard allow you to manage and monitor the Ceph Object Gateway. You can also create the Ceph Object Gateway services with Secure Sockets Layer (SSL) using the dashboard. For example, monitoring functions allow you to view details about a gateway daemon such as its zone name, or performance graphs of GET and PUT rates. Management functions allow you to view, create, and edit both users and buckets. Ceph Object Gateway functions are divided between user functions and bucket functions. 11.1. Manually adding Ceph object gateway login credentials to the dashboard The Red Hat Ceph Storage Dashboard can manage the Ceph Object Gateway, also known as the RADOS Gateway, or RGW. When Ceph Object Gateway is deployed with cephadm , the Ceph Object Gateway credentials used by the dashboard is automatically configured. You can also manually force the Ceph object gateway credentials to the Ceph dashboard using the command-line interface. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. Ceph Object Gateway is installed. Procedure Log into the Cephadm shell: Example Set up the credentials manually: Example This creates a Ceph Object Gateway user with UID dashboard for each realm in the system. Optional: If you have configured a custom admin resource in your Ceph Object Gateway admin API, you have to also set the the admin resource: Syntax Example Optional: If you are using HTTPS with a self-signed certificate, disable certificate verification in the dashboard to avoid refused connections. Refused connections can happen when the certificate is signed by an unknown Certificate Authority, or if the host name used does not match the host name in the certificate. Syntax Example Optional: If the Object Gateway takes too long to process requests and the dashboard runs into timeouts, you can set the timeout value: Syntax The default value of 45 seconds. Example 11.2. Creating the Ceph Object Gateway services with SSL using the dashboard After installing a Red Hat Ceph Storage cluster, you can create the Ceph Object Gateway service with SSL using two methods: Using the command-line interface. Using the dashboard. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. SSL key from Certificate Authority (CA). Note Obtain the SSL certificate from a CA that matches the hostname of the gateway host. Red Hat recommends obtaining a certificate from a CA that has subject alternate name fields and a wildcard for use with S3-style subdomains. Procedure From the dashboard navigation, go to Administration->Services . Click Create . Fill in the Create Service form. Select rgw from the Type service list. Enter the ID that is used in service_id . Select SSL . Click Choose File and upload the SSL certificate .pem format. Figure 11.1. Creating Ceph Object Gateway service Click Create Service . Check the Ceph Object Gateway service is up and running. Additional Resources See the Configuring SSL for Beast section in the Red Hat Ceph Storage Object Gateway Guide . 11.3. Configuring high availability for the Ceph Object Gateway on the dashboard The ingress service provides a highly available endpoint for the Ceph Object Gateway. You can create and configure the ingress service using the Ceph Dashboard. Prerequisites A running Red Hat Ceph Storage cluster. A minimum of two Ceph Object Gateway daemons running on different hosts. Dashboard is installed. A running rgw service. Procedure From the dashboard navigation, go to Administration->Services . Click Create . In the Create Service form, select ingress service. Select backend service and edit the required parameters. Figure 11.2. Creating ingress service Click Create Service . A notification displays that the ingress service was created successfully. Additional Resources See High availability for the Ceph Object Gateway for more information about the ingress service. 11.4. Managing Ceph Object Gateway users on the dashboard As a storage administrator, the Red Hat Ceph Storage Dashboard allows you to view and manage Ceph Object Gateway users. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. 11.4.1. Creating Ceph object gateway users on the dashboard You can create Ceph object gateway users on the Red Hat Ceph Storage once the credentials are set-up using the CLI. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. Procedure From the dashboard navigation, go to Object->Users . On the Users tab, click Create . Create User form, set the following parameters: Enter the User ID and Full name . If required, edit the maximum number of buckets. Optional: Fill in an Email address Optional: Select if the user is Suspended or a System user . Optional: In the S3 key section, set a custom access key and secret key by clearing the Auto-generate key selection. Optional: In the User quota section, select if the user quota is Enabled , Unlimited size , or has Unlimited objects . If there is a limited size enter the maximum size. If there are limited objects, enter the maximum objects. Optional: In the Bucket quota section, select if the bucket quota is Enabled , Unlimited size , or has Unlimited objects . If there is a limited size enter the maximum size. If there are limited objects, enter the maximum objects. Click Create User . Figure 11.3. Create Ceph object gateway user A notification displays that the user was created successfully. Additional Resources See the Manually adding Ceph object gateway login credentials to the dashboard section in the Red Hat Ceph Storage Dashboard guide for more information. See the Red Hat Ceph Storage Object Gateway Guide for more information. 11.4.2. Adding roles to the Ceph Object Gateway users on the dashboard You can add a role to a specific Ceph object gateway user on the Red Hat Ceph Storage dashboard. Prerequisites Ceph Object Gateway is installed. Ceph Object gateway login credentials are added to the dashboard. Ceph Object gateway user is created. Procedure Log in to the Dashboard. On the navigation bar, click Object Gateway . Click Roles . Select the user by clicking the relevant row. From Edit drop-down menu, select Create Role . In the Create Role window, configure Role name , Path , and Assume Role Policy Document . Figure 11.4. Create Ceph object gateway subuser Click Create Role . 11.4.3. Creating Ceph object gateway subusers on the dashboard A subuser is associated with a user of the S3 interface. You can create a sub user for a specific Ceph object gateway user on the Red Hat Ceph Storage dashboard. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. Object gateway user is created. Procedure From the dashboard navigation, go to Object->Users . On the Uers tab, select a user and click Edit . In the Edit User form, click Create Subuser . In the Create Subuser dialog, enter the username and select the appropriate permissions. Select the Auto-generate secret box and then click Create Subuser . Figure 11.5. Create Ceph object gateway subuser Note By selecting Auto-generate-secret , the secret key for Object Gateway is generated automatically. In the Edit User form, click Edit user . A notification displays that the user was updated successfully. 11.4.4. Editing Ceph object gateway users on the dashboard You can edit Ceph object gateway users on the Red Hat Ceph Storage once the credentials are set-up using the CLI. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. A Ceph object gateway user is created. Procedure From the dashboard navigation, go to Object->Users . On the Users tab, select the user row and click Edit . In the Edit User form, edit the required parameters and click Edit User . Figure 11.6. Edit Ceph object gateway user A notification displays that the user was updated successfully. Additional Resources See the Manually adding Ceph object gateway login credentials to the dashboard section in the Red Hat Ceph Storage Dashboard guide for more information. See the Red Hat Ceph Storage Object Gateway Guide for more information. 11.4.5. Deleting Ceph Object Gateway users on the dashboard You can delete Ceph object gateway users on the Red Hat Ceph Storage once the credentials are set-up using the CLI. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. A Ceph object gateway user is created. Procedure From the dashboard navigation, go to Object->Users . Select the Username to delete, and click Delete from the action drop-down. In the Delete user notification, select Yes, I am sure and click Delete User . The user is removed from the Users table. Figure 11.7. Delete Ceph object gateway user Additional Resources See the Manually adding Ceph object gateway login credentials to the dashboard section in the Red Hat Ceph Storage Dashboard guide for more information. See the Red Hat Ceph Storage Object Gateway Guide for more information. 11.5. Managing Ceph Object Gateway buckets on the dashboard As a storage administrator, the Red Hat Ceph Storage Dashboard allows you to view and manage Ceph Object Gateway buckets. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. At least one Ceph Object Gateway user is created. Object gateway login credentials are added to the dashboard. 11.5.1. Creating Ceph object gateway buckets on the dashboard You can create Ceph object gateway buckets on the Red Hat Ceph Storage once the credentials are set-up using the CLI. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. Object gateway user is created and not suspended. Procedure From the dashboard navigation, go to Object->Buckets . Click Create . The Create Bucket form displays. Enter a Name for the bucket. Select an Owner . The owner is a user that is not suspended. Select a Placement target . Important A bucket's placement target cannot be changed after creation. Optional: In the Locking section, select Enabled to enable locking for the bucket objects. Important Locking can only be enabled while creating a bucket and cannot be changed after creation. Select the Mode , either Compliance or Governance . In the Days field, select the default retention period that is applied to new objects placed in this bucket. Optional: In the Security section, select Security to encrypt objects in the bucket. Set the configuration values for SSE-S3. Click the Encryption information icon and then Click here . Note When using SSE-S3 encryption type, Ceph manages the encryption keys that are stored in the vault by the user. In the Update RGW Encryption Configurations dialog, ensure that SSE-S3 is selected as the Encryption Type . Fill the other required information. Click Submit . Figure 11.8. Encrypt objects in the bucket In the Tags section, click Add to add bucket tags. These tags are equivalent to the S3 PutBucketTagging. Enter the tag Key and tag Value to categorize your storage buckets. Set the bucket policies in the Policies section. Enter the Bucket policy . Use the Policy generator or Policy examples buttons to help create the bucket policies, as needed. Enter or modify the policy in JSON format. Use the following links from within the form to help create your bucket policy. These links open a new tab in your browser. Policy generator is an external tool from AWS to generate a bucket policy. For more information, see link::https://awspolicygen.s3.amazonaws.com/policygen.html[AWS Policy Generator]. Note You can use the policy generator with the S3 Bucket Policy type as a guideline for building your Ceph Object Gateway bucket policies. Policy examples takes you to AWS documentation with examples of bucket policies. For more information about managing bucket policies through the dashboard, see Managing Ceph Object Gateway bucket policies on the dashboard . Set the Access Control Lists (ACL) grantee and permission information. Table 11.1. ACL user options Permission Bucket Object READ Grantee can list the objects in the bucket. Grantee can read the object. WRITE Grantee can write or delete objects in the bucket. N/A FULL_CONTROL Grantee has full permissions for object in the bucket. Grantee can read or write to the object ACL. Click Create bucket . A notification displays that the bucket was created successfully. 11.5.2. Editing Ceph object gateway buckets on the dashboard You can edit Ceph object gateway buckets on the Red Hat Ceph Storage once the credentials are set-up using the CLI. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. Object gateway user is created and not suspended. A Ceph Object Gateway bucket created. Procedure From the dashboard navigation, go to Object>Buckets . On the navigation bar, click Object Gateway . Select the bucket row that needs to be updated, and click Edit . The Edit Bucket displays. Optional: Enable Versioning if you want to enable versioning state for all the objects in an existing bucket. To enable versioning, you must be the owner of the bucket. If Locking is enabled during bucket creation, you cannot disable the versioning. All objects added to the bucket will receive a unique version ID. If the versioning state has not been set on a bucket, then the bucket will not have a versioning state. Optional: Select (Delete enabled) for Multi-Factor Authentication. Multi-Factor Authentication (MFA) ensures that users need to use a one-time password (OTP) when removing objects on certain buckets. Enter a value for Token Serial Number and Token PIN . Note The buckets must be configured with versioning and MFA enabled which can be done through the S3 API. Optional: As needed, update the Tags and Policies . Updating the Policies includes updating the Bucket policy and Access Control Lists (ACL) grantee and permission information. For more information, see Creating Ceph object gateway buckets on the dashboard . Click Edit Bucket to save the changes. A notification displays that the bucket was updated successfully. 11.5.3. Deleting Ceph Object Gateway buckets on the dashboard You can delete Ceph object gateway buckets on the Red Hat Ceph Storage once the credentials are set-up using the CLI. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. Object Gateway login credentials are added to the dashboard. Object Gateway user is created and not suspended. A Ceph Object Gateway bucket created. Procedure From the dashboard navigation, go to Object->Buckets . Select the bucket to be deleted, and click Delete from the action drop-down. In the Delete Bucket notification, select Yes, I am sure and click Delete bucket . Figure 11.9. Delete Ceph Object Gateway bucket 11.6. Monitoring multi-site object gateway configuration on the Ceph dashboard The Red Hat Ceph Storage dashboard supports monitoring the users and buckets of one zone in another zone in a multi-site object gateway configuration. For example, if the users and buckets are created in a zone in the primary site, you can monitor those users and buckets in the secondary zone in the secondary site. Prerequisites At least one running Red Hat Ceph Storage cluster deployed on both the sites. Dashboard is installed. The multi-site object gateway is configured on the primary and secondary sites. Object gateway login credentials of the primary and secondary sites are added to the dashboard. Object gateway users are created on the primary site. Object gateway buckets are created on the primary site. Procedure From the dashboard navigation of the secondary site, go to Object->Buckets . View the Object Gateway buckets on the secondary landing page that were created for the Object Gateway users on the primary site. Figure 11.10. Multi-site Object Gateway monitoring Additional Resources For more information on configuring multi-site, see the Multi-site configuration and administration section of the Red Hat Ceph Storage Object Gateway guide. For more information on adding object gateway login credentials to the dashboard, see the Manually adding Ceph Object Gateway login credentials to the dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on creating object gateway users on the dashboard, see the Creating Ceph Object Gateway users on the dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on creating object gateway buckets on the dashboard, see the Creating Ceph Object Gateway buckets on the dashboard section in the Red Hat Ceph Storage Dashboard guide. 11.7. Viewing Ceph object gateway per-user and per-bucket performance counters on the dashboard You can view the Ceph Object Gateway performance counters per user per bucket in the Grafana dashboard. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. Grafana is installed. The Ceph Object Gateway is installed. Object gateway login credentials are added to the dashboard. Object gateway user is created and not suspended. Configure below parameters to Ceph Object Gateway service: Procedure Log in to the Grafana URL. Syntax Example Go to the 'Dashboard' tab and search for 'RGW S3 Analytics'. To view per-bucket Ceph Object gateway operations, select the 'Bucket' panel: To view user-level Ceph Object gateway operations, select the 'User' panel: Note The output of per-bucket/per-user get operation count command increases by two for each 'get' operation run from client: s3cmd. This is a known issue. 11.8. Managing Ceph Object Gateway bucket policies on the dashboard As a storage administrator, the Red Hat Ceph Storage Dashboard allows you to view and manage Ceph Object Gateway bucket policies. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. At least one Ceph object gateway user is created. Ceph Object Gateway login credentials are added to the dashboard. At least one Ceph Object Gateway bucket. For more information about creating a bucket, see Creating Ceph Object Gateway buckets on the dashboard . 11.8.1. Creating and editing Ceph Object Gateway bucket policies on the dashboard You can create and edit Ceph Object Gateway bucket policies on the Red Hat Ceph Storage dashboard. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. At least one Ceph object gateway user is created. Ceph Object Gateway login credentials are added to the dashboard. At least one Ceph Object Gateway bucket. For more information about creating a bucket, see Creating Ceph Object Gateway buckets on the dashboard . Procedure From the dashboard, go to Object Buckets . Create or modify a bucket policy for an existing bucket. Note To create a bucket policy during bucket creation, click Create and fill in the bucket policy information in the Policies section of the Create Bucket form. Select the bucket for which the bucket policy will be created or modified, and then click Edit . In the Create Bucket form, go to Policies . Enter or modify the policy in JSON format. Use the following links from within the form to help create your bucket policy. These links open a new tab in your browser. Policy generator is an external tool from AWS to generate a bucket policy. For more information, see AWS Policy Generator . Note You can use the policy generator with the S3 Bucket Policy type as a guideline for building your Ceph Object Gateway bucket policies. Policy examples takes you to AWS documentation with examples of bucket policies. To save the bucket policy, click Edit Bucket . Note When creating a bucket policy during an initial bucket creation, click Create Bucket . When the bucket policy is saved, the Updated Object Gateway bucket `bucketname` notification is displayed. 11.8.2. Deleting Ceph Object Gateway bucket policies on the dashboard You can delete Ceph Object Gateway bucket policies on the Red Hat Ceph Storage dashboard. Prerequisites A running Red Hat Ceph Storage cluster. Dashboard is installed. The Ceph Object Gateway is installed. At least one Ceph object gateway user is created. Ceph Object Gateway login credentials are added to the dashboard. At least one Ceph Object Gateway bucket. For more information about creating a bucket, see Creating Ceph Object Gateway buckets on the dashboard . Procedure From the dashboard, go to Object Buckets . Select the bucket for which the bucket policy will be created or modified, and then click Edit . In the Edit Bucket form, go to Policies . Click Clear . To complete the bucket policy deletion, click Edit Bucket . When the bucket policy is deleted, the Updated Object Gateway bucket `bucketname` notification is displayed. 11.9. Managing S3 bucket lifecycle policies on the dashboard As a storage administrator, the Red Hat Ceph Storage Dashboard allows you to view and manage S3 bucket lifecycle policies on the dashboard. Prerequisites A running Red Hat Ceph Storage cluster. At least one Ceph object gateway user is created. Ceph Object Gateway login credentials are added to the dashboard. At least one Ceph Object Gateway bucket. For more information about creating a bucket, see Creating Ceph Object Gateway buckets on the dashboard . 11.9.1. Applying and viewing S3 bucket lifecycle policies on the dashboard You can apply and manage S3 bucket lifecycle policies on the Red Hat Ceph Storage dashboard. Procedure Bucket lifecycle policiy cannot be applied during the creation of the bucket. They can be applied only after a bucket is created. From the dashboard, go to Object Buckets . Select the bucket for which the lifecycle policy needs to be applied and click Edit . In the Edit Bucket form, go to Policies and apply the lifecycle rule in the Lifecycle field in JSON format. To save the bucket lifecycle policy, click Edit Bucket . Figure 11.11. Apply bucket lifecycle policy After the bucket lifecycle policy is applied, it can viewed in the bucket listing screen by expanding the relevant bucket entry. Figure 11.12. View bucket lifecycle policy 11.9.2. Deleting S3 bucket lifecycle policies on the dashboard You can delete S3 bucket lifecycle policies on the Red Hat Ceph Storage dashboard. Procedure From the dashboard, go to Object Buckets . Select the bucket for which the bucket lifecycle policy needs to be deleted, and click Edit . In the Edit Bucket form, go to Policies . Click Clear . To complete the bucket lifecycle policy deletion, click Edit Bucket . 11.10. Management of buckets of a multi-site object configuration on the Ceph dashboard As a storage administrator, you can edit buckets of one zone in another zone on the Red Hat Ceph Storage Dashboard. However, you can delete buckets of secondary sites in the primary site. You cannot delete the buckets of master zones of primary sites in other sites. For example, If the buckets are created in a zone in the secondary site, you can edit and delete those buckets in the master zone in the primary site. Prerequisites At least one running Red Hat Ceph Storage cluster deployed on both the sites. Dashboard is installed. The multi-site object gateway is configured on the primary and secondary sites. Object gateway login credentials of the primary and secondary sites are added to the dashboard. Object gateway users are created on the primary site. Object gateway buckets are created on the primary site. At least rgw-manager level of access on the Ceph dashboard. 11.10.1. Monitoring buckets of a multi-site object Monitor the multi-site sync status of a bucket on the dashboard. You can view the source zones and sync status from Object->Multi-site on the Ceph Dashboard. The multi-site sync status is divided into two sections: Primary Source Zone Displays the default realm, zonegroup, and the zone the Ceph Object Gateway is connected to. Source Zones View both the metadata sync status and data sync information progress. When you click the status, a breakdown of the shard syncing is displayed. The sync status shows the Last Synced time stamp with the relative time of the last sync occurrence in relation to the current time. When the sync is complete, this shows as Up to Date . When a sync is not caught up the status shows as Syncing . However, the Last sync shows the number of days the sync is not caught up. By clicking Syncing , it displays the details about shards which are not synced. 11.10.2. Editing buckets of a multi-site Object Gateway configuration on the Ceph Dashboard You can edit and update the details of the buckets of one zone in another zone on the Red Hat Ceph Storage Dashboard in a multi-site object gateway configuration. You can edit the owner, versioning, multi-factor authentication and locking features of the buckets with this feature of the dashboard. Prerequisites At least one running Red Hat Ceph Storage cluster deployed on both the sites. Dashboard is installed. The multi-site object gateway is configured on the primary and secondary sites. Object gateway login credentials of the primary and secondary sites are added to the dashboard. Object gateway users are created on the primary site. Object gateway buckets are created on the primary site. At least rgw-manager level of access on the Ceph dashboard. Procedure From the dashboard navigation of the secondary site, go to Object->Buckets . The Object Gateway buckets from the primary site are displayed. Select the bucket that you want to edit, and click Edit from the action drop-down. In the Edit Bucket form, edit the required prameters, and click Edit Bucket . A notification is displayed that the bucket is updated successfully. Figure 11.13. Edit buckets in a multi-site Additional Resources For more information on adding object gateway login credentials to the dashboard, see the Manually adding Ceph Object Gateway login credentials to the Ceph dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on creating object gateway users on the dashboard, see the Creating Ceph Object Gateway users on the Ceph dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on creating object gateway buckets on the dashboard, see the Creating Ceph Object Gateway buckets on the Ceph dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on system roles, see the Managing roles on the Ceph dashboard section in the Red Hat Ceph Storage Dashboard Guide . 11.10.3. Deleting buckets of a multi-site Object Gateway configuration on the Ceph Dashboard You can delete buckets of secondary sites in primary sites on the Red Hat Ceph Storage Dashboard in a multi-site Object Gateway configuration. Important Red Hat does not recommend to delete buckets of primary site from secondary sites. Prerequisites At least one running Red Hat Ceph Storage cluster deployed on both the sites. Dashboard is installed. The multi-site object gateway is configured on the primary and secondary sites. Object Gateway login credentials of the primary and secondary sites are added to the dashboard. Object Gateway users are created on the primary site. Object Gateway buckets are created on the primary site. At least rgw-manager level of access on the Ceph dashboard. Procedure From the dashboard navigation of the primary site, go to Object->Buckets . Select the bucket of the secondary site to be deleted, and click Delete from the action drop-down. In the Delete Bucket notification, select Yes, I am sure and click Delete bucket . The bucket is deleted from the Buckets table. Additional Resources For more information on configuring multi-site, see the Multi-site configuration and administration section of the Red Hat Ceph Storage Object Gateway guide. For more information on adding object gateway login credentials to the dashboard, see the Manually adding object gateway login credentials to the Ceph dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on creating object gateway users on the dashboard, see the Creating object gateway users on the Ceph dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on creating object gateway buckets on the dashboard, see the Creating object gateway buckets on the Ceph dashboard section in the Red Hat Ceph Storage Dashboard guide. For more information on system roles, see the System roles on the Ceph dashboard section in the Red Hat Ceph Storage Dashboard Guide . 11.11. Configuring a multi-site object gateway on the Ceph dashboard You can configure Ceph Object Gateway multi-site on the Red Hat Ceph Storage Dashboard. Prerequisites A running Red Hat Ceph Storage cluster deployed on both the sites. At least one Ceph Object Gateway service installed at both the sites. Procedure Enable the Ceph Object Gateway module for import/export on both the the primary and secondary sites. From the dashboard navigation of the secondary site, go to Object->Multi-site . In the In order to access the import/export feature, the rgw module must be enabled note, click Enable . On the primary site dashboard, create a default realm, zonegroup, and zone. Click Create Realm . In the Create Realm form, provide a realm name, and select Default . Click Create Realm . Click Create Zone Group from the action drop-down. In the Create Zone Group form, provide a zone group name, the Ceph Object Gateway endpoints, and select Default . Click Create Zone Group . Click Create Zone from the action drop-down. In the Create Zone form, provide a Zone Name , select Default , and provide the Ceph Object Gateway endpoints of the primary site. For the user, provide the access and secret key of the user with system privileges. Note While creating a zone, Red Hat recommends to give access key and secret key of the dashboard default user, dashboard . Click Create Zone . A warning is displayed to restart the Ceph Object Gateway service to complete the zone creation. Restart the Ceph Object Gateway service. From the dashboard navigation of the secondary site, go to Administration->Services . Select the Ceph Object Gateway service row and expand the row. From the Daemons tab, select the hostname. Click Restart from the action drop-down. From the dashboard navigataion, in Object->Overview you get an error that "The Object Gateway Service is not configured". This bug is a known issue. See BZ#2231072 . As a workaround, set the Ceph Object Gateway credentials on the command-line interface. Syntax Go to Object->Overview to verify that you are able to access the Ceph Object Gateway on the dashboard. Create a replication user on the primary site. You can use the following two options: Create user using the CLI: Example Create user from the dashboard and modify the user from the CLI: Example From the dashboard navigation, go to Object->Users . Expand the user row and from Keys , click Show . Use the Copy to Clipboard to copy the access and secret keys. These will be used in a later step. From the primary site dashboard, go to Object->Multi-site . From the Topology Viewer , select the zone and click the Edit icon. From the Edit Zone form, paste the access key in the S3 access key field and the secret key in the S3 secret key field. Use the keys that were copied previously. Click Edit Zone . Click Export . From the Export Multi-site Realm Token dialog, copy the token. From the secondary site, go to Object->Multi-site . Import the token from the primary zone, by clicking Import . In the Import Multi-site Token dialog, in the Zone section, paste the token that was copied earlier, and provide a secondary zone name. In the Service section, select the placement and the port where the new Ceph Object Gateway service is going to be created. Click Import . A warning is displayed to restart the Ceph Object Gateway service. Restart the Ceph Object Gateway service. From the dashboard navigation of the secondary site, go to Administration->Services . Select the Ceph Object Gateway service row and expand the row. From the Daemons tab, select the hostname. Click Restart from the action drop-down. Wait until the users are synced to the secondary site. Verify that the sync is complete using the following commands: Syntax Example In Object->Overview you get an error that "The Object Gateway Service is not configured". This bug is a known issue. See BZ#2231072 . As a workaround, set the Ceph Object Gateway credentials on the command-line interface. Syntax Go to Object->Overview to verify that you are able to access the Ceph Object Gateway on the dashboard. On the primary site, Object->Overview , in the Multi-Site Sync Status section, an error is displayed because on the secondary zone you can see that the endpoints and the hostname are not the IP address. This bug is a known issue while configuring multi-site. See BZ#2242994 . As a workaround, from the secondary site dashboard, go to Object->Multi-site . Select the secondary zone and click the Edit icon. Edit the endpoints to reflect the IP address. Click Edit Zone . On the primary site and secondary site dashboards, from Object->Overview , in the Multi-Site Sync Status section, the status displays. Verification Create a user on the primary site. You see that the user syncs to the secondary site.
[ "cephadm shell", "ceph dashboard set-rgw-credentials", "ceph dashboard set-rgw-api-admin-resource RGW_API_ADMIN_RESOURCE", "ceph dashboard set-rgw-api-admin-resource admin Option RGW_API_ADMIN_RESOURCE updated", "ceph dashboard set-rgw-api-ssl-verify false", "ceph dashboard set-rgw-api-ssl-verify False Option RGW_API_SSL_VERIFY updated", "ceph dashboard set-rest-requests-timeout _TIME_IN_SECONDS_", "ceph dashboard set-rest-requests-timeout 240", "ceph config set <rgw-service> <param> <value> \"rgw_bucket_counters_cache\": \"true\" \"rgw_user_counters_cache\": \"true\"", "https://DASHBOARD_URL:3000", "https://dashboard_url:3000", "ceph dashboard set-rgw-credentials RGW credentials configured", "radosgw-admin user create --uid=\"uid\" --display-name=\"displayname\" --system", "radosgw-admin user modify --uid=\"uid\" --system", "radosgw-admin sync status radosgw-admin user list", "radosgw-admin sync status radosgw-admin user list", "ceph dashboard set-rgw-credentials RGW credentials configured" ]
https://docs.redhat.com/en/documentation/red_hat_ceph_storage/8/html/dashboard_guide/management-of-ceph-object-gateway-using-the-dashboard
Server Administration Guide
Server Administration Guide Red Hat Single Sign-On 7.6 For Use with Red Hat Single Sign-On 7.6 Red Hat Customer Content Services
[ ".../bin/add-user-keycloak.sh -r master -u <username> -p <password>", "> ...\\bin\\add-user-keycloak.bat -r master -u <username> -p <password>", "chgrp jboss /opt/rh/rh-sso7/root/usr/share/keycloak/standalone/configuration/keycloak-add-user.json", ".../bin/add-user-keycloak.sh --sc domain/servers/server-one/configuration -r master -u <username> -p <password>", "> ...\\bin\\add-user-keycloak.bat --sc domain/servers/server-one/configuration -r master -u <username> -p <password>", "----Begin Certificate---- <Output> ----End Certificate----", "keytool -delete -keystore <keystore>.jks -storepass <password> -alias <key>", "keytool -importcert -file domain.crt -keystore <keystore>.jks -storepass <password> -alias <key>", "wildfly:undeploy mvn clean install wildfly:deploy", "When you create LDAP provider, message appear in the server log in the INFO level starting with:", "Creating new LDAP Store for the LDAP storage provider:", "Mapper for provider: XXX, Mapper name: YYY, Provider: ZZZ", "docker run --name freeipa-server-container -it -h server.freeipa.local -e PASSWORD=YOUR_PASSWORD -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /var/lib/ipa-data:/data:Z freeipa/freeipa-server", "x.x.x.x server.freeipa.local", "ipa-client-install --mkhomedir -p admin -w password", "kinit admin", "ipa user-add <username> --first=<first name> --last=<surname> --email=<email address> --phone=<telephoneNumber> --street=<street> \\ --city=<city> --state=<state> --postalcode=<postal code> --password", "kinit <username>", "kdestroy -A kinit admin", "sudo yum install sssd-dbus", "bin/federation-sssd-setup.sh", "[domain/your-hostname.local] ldap_user_extra_attrs = mail:mail, sn:sn, givenname:givenname, telephoneNumber:telephoneNumber [sssd] services = nss, sudo, pam, ssh, ifp [ifp] allowed_uids = root, yourOSUsername user_attributes = +mail, +telephoneNumber, +givenname, +sn", "sudo dbus-send --print-reply --system --dest=org.freedesktop.sssd.infopipe /org/freedesktop/sssd/infopipe org.freedesktop.sssd.infopipe.GetUserGroups string:john", "[ifp] allowed_uids = root, your_username", "sudo yum install rh-sso7-libunix-dbus-java", "sudo yum install jna", "sudo sssctl user-checks admin -s keycloak", "https://developers.google.com/recaptcha/", "{ \"attributes\": [ { \"name\": \"myattribute\", \"required\": { \"roles\": [ \"user\", \"admin\" ], \"scopes\": [ \"foo\", \"bar\" ] }, \"permissions\": { \"view\": [ \"admin\", \"user\" ], \"edit\": [ \"admin\", \"user\" ] }, \"validations\": { \"email\": {}, \"length\": { \"max\": 255 } }, \"annotations\": { \"myannotation\": \"myannotation-value\" } } ], \"groups\": [ { \"name\": \"personalInfo\", \"displayHeader\": \"Personal Information\" } ] }", "{ \"attributes\": [ { \"name\": \"myattribute\", \"required\": {} ] }", "{ \"attributes\": [ { \"name\": \"myattribute\", \"required\": { \"roles\": [\"user\"] } ] }", "{ \"attributes\": [ { \"name\": \"myattribute\", \"required\": { \"scopes\": [\"foo\"] } ] }", "{ \"attributes\": [ { \"name\": \"myattribute\", \"permissions\": { \"view\": [\"admin\"], \"edit\": [\"user\"] } ] }", "{ \"attributes\": [ { \"name\": \"myattribute\", \"annotations\": { \"foo\": [\"foo-value\"], \"bar\": [\"bar-value\"] } ] }", "\"attributes\": [ { \"name\": \"jobTitle\", \"validations\": { \"options\": { \"options\":[ \"sweng\", \"swarch\" ] } }, \"annotations\": { \"inputType\": \"select\", \"inputOptionLabels\": { \"sweng\": \"Software Engineer\", \"swarch\": \"Software Architect\" } } } ]", "\"attributes\": [ { \"name\": \"jobTitle\", \"validations\": { \"options\": { \"options\":[ \"sweng\", \"swarch\" ] } }, \"annotations\": { \"inputType\": \"select-radiobuttons\", \"inputOptionLabels\": { \"sweng\": \"USD{jobtitle.swengineer}\", \"swarch\": \"USD{jobtitle.swarchitect}\" } } } ]", "<subsystem xmlns=\"urn:jboss:domain:keycloak-server:1.2\"> <spi name=\"userSessions\"> <default-provider>infinispan</default-provider> <provider name=\"infinispan\" enabled=\"true\"> <properties> <property name=\"preloadOfflineSessionsFromDatabase\" value=\"true\"/> </properties> </provider> </spi> </subsystem>", "/subsystem=keycloak-server/spi=userSessions:add(default-provider=infinispan) /subsystem=keycloak-server/spi=userSessions/provider=infinispan:add(properties={preloadOfflineSessionsFromDatabase => \"true\"},enabled=true)", "https://{DOMAIN}/auth/realms/{REALMNAME}/protocol/openid-connect/auth?client_id={CLIENT-ID}&redirect_uri={REDIRECT-URI}&scope=openid&response_type=code&response_mode=query&nonce=exg16fxdjcu&claims=%7B%22id_token%22%3A%7B%22acr%22%3A%7B%22essential%22%3Atrue%2C%22values%22%3A%5B%22gold%22%5D%7D%7D%7D", "claims= { \"id_token\": { \"acr\": { \"essential\": true, \"values\": [\"gold\"] } } }", "Deny new session - when a new session is requested and the session limit is reached, no new sessions can be created. Terminate oldest session - when a new session is requested and the session limit has been reached, the oldest session will be removed and the new session created.", "sudo kadmin.local", "addprinc -randkey HTTP/[email protected] ktadd -k /tmp/http.keytab HTTP/[email protected]", "[domain_realm] .mydomain.org = MYDOMAIN.ORG mydomain.org = MYDOMAIN.ORG", "// Obtain accessToken in your application. KeycloakPrincipal keycloakPrincipal = (KeycloakPrincipal) servletReq.getUserPrincipal(); AccessToken accessToken = keycloakPrincipal.getKeycloakSecurityContext().getToken(); // Retrieve Kerberos credential from accessToken and deserialize it String serializedGssCredential = (String) accessToken.getOtherClaims(). get(org.keycloak.common.constants.KerberosConstants.GSS_DELEGATION_CREDENTIAL); GSSCredential deserializedGssCredential = org.keycloak.common.util.KerberosSerializationUtils. deserializeCredential(serializedGssCredential); // Create GSSContext to call other Kerberos-secured services GSSContext context = gssManager.createContext(serviceName, krb5Oid, deserializedGssCredential, GSSContext.DEFAULT_LIFETIME);", "emailAddress=(.*?)(?:,|USD)", "<security-realms> <security-realm name=\"ssl-realm\"> <server-identities> <ssl> <keystore path=\"servercert.jks\" relative-to=\"jboss.server.config.dir\" keystore-password=\"servercert password\"/> </ssl> </server-identities> <authentication> <truststore path=\"truststore.jks\" relative-to=\"jboss.server.config.dir\" keystore-password=\"truststore password\"/> </authentication> </security-realm> </security-realms>", "<subsystem xmlns=\"urn:jboss:domain:undertow:12.0\"> . <server name=\"default-server\"> <https-listener name=\"default\" socket-binding=\"https\" security-realm=\"ssl-realm\" verify-client=\"REQUESTED\"/> </server> </subsystem>", "<spi name=\"x509cert-lookup\"> <default-provider>haproxy</default-provider> <provider name=\"haproxy\" enabled=\"true\"> <properties> <property name=\"sslClientCert\" value=\"SSL_CLIENT_CERT\"/> <property name=\"sslCertChainPrefix\" value=\"CERT_CHAIN\"/> <property name=\"certificateChainLength\" value=\"10\"/> </properties> </provider> </spi>", "<spi name=\"x509cert-lookup\"> <default-provider>apache</default-provider> <provider name=\"apache\" enabled=\"true\"> <properties> <property name=\"sslClientCert\" value=\"SSL_CLIENT_CERT\"/> <property name=\"sslCertChainPrefix\" value=\"CERT_CHAIN\"/> <property name=\"certificateChainLength\" value=\"10\"/> </properties> </provider> </spi>", "<spi name=\"x509cert-lookup\"> <default-provider>nginx</default-provider> <provider name=\"nginx\" enabled=\"true\"> <properties> <property name=\"sslClientCert\" value=\"ssl-client-cert\"/> <property name=\"sslCertChainPrefix\" value=\"USELESS\"/> <property name=\"certificateChainLength\" value=\"2\"/> </properties> </provider> </spi>", "server { ssl_client_certificate trusted-ca-list-for-client-auth.pem; ssl_verify_client optional_no_ca; ssl_verify_depth 2; location / { proxy_set_header ssl-client-cert USDssl_client_escaped_cert; } }", "<profile> <subsystem xmlns=\"urn:jboss:domain:logging:8.0\"> <logger category=\"org.keycloak.authentication.authenticators.x509\"> <level name=\"TRACE\"/> </logger> <logger category=\"org.keycloak.services.x509\"> <level name=\"TRACE\"/> </logger>", "LOC=<install-dir>/intermediate1/user-certificate curl --insecure https://localhost:8443/auth/realms/X509_demo/protocol/openid-connect/token --data \"grant_type=password\" -E USDLOC/user1.crt --key USDLOC/user1.key --cacert USDLOC/intermediate-ca-chain.crt -d client_id=account -d client_secret=BNm5AQPJGEtbayIAoiKUetr0lkXKSlF4 | jq % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 2097 100 2013 100 84 25481 1063 -::- -::- -::- 26544 { \"access_token\": \"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ1OUNpN0tzUjBIOEFCQXEtQ1Z4SEFDSUo1M1hNYWVhclJrYkw4cFd1VW4wIn0.eyJleHAiOjE2Njc4MzA5NjAsImlhdCI6MTY2NzgzMDY2MCwianRpIjoiNDU5YzE2OGMtODU3ZS00OWRjLTgxYjItZjVhM2M3M2MwODMzIiwiaXNzIjoiaHR0cHM6Ly9sb2NhbGhvc3Q6ODQ0My9hdXRoL3JlYWxtcy9YNTA5X2RlbW8iLCJzdWIiOiIwODZiMTgyZC00MzdhLTQzZDItYTRmZS05ZGZmYTNmOTBiZDAiLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiJhY2NvdW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImMwYjNiMTJjLTM5YmEtNGQ0Ni1iNDNlLTZkMTM0MGJmNTA5OCIsImFjciI6IjEiLCJyZXNvdXJjZV9hY2Nlc3MiOnsiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic2lkIjoiYzBiM2IxMmMtMzliYS00ZDQ2LWI0M2UtNmQxMzQwYmY1MDk4IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJ1c2VyMSIsImVtYWlsIjoidXNlckByZWRoYXQuY29tIn0.CDtltEkmITloDpqU5alq4U1JopqEJVeoglT-wA43edQ_DfeWSgefL0BIrPlt1SKhFMOVitwyq_9XZvfiS5ZiObE33cDmhr6eohbUtDPibU2GuEIYP9WjlVpZDMaSKQVu5SwM91m6yei22PtH-ApPOBeG4Ru0xZtNXjwGQpuIJEi_H1rZdPY3I4U2lPuQo4Uono5gnF7re_nUvf90FJi0uaOOrsvUhUkj1xEwQ0Diy1oIymcbrDL0Ek7B30StBcjn-fe3-0GpLttLQju0OGTkwD7Eb0UWTKoWAwspMlgpf9NaIGj8rmBsz6eBlGIGWBN2Qg6v3PzbJ2NXKvq435f9Zg\", \"expires_in\": 300, \"refresh_expires_in\": 1800, \"refresh_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICIyMmFkZDdhMy0xN2RjLTQ5NmQtYTk4NS05YWZhNGZhODVhMTEifQ.eyJleHAiOjE2Njc4MzI0NjAsImlhdCI6MTY2NzgzMDY2MCwianRpIjoiZWU4MjJhMzYtMWEzMS00ZGEzLWIxMGEtNmY1ODkxYmI0MzlhIiwiaXNzIjoiaHR0cHM6Ly9sb2NhbGhvc3Q6ODQ0My9hdXRoL3JlYWxtcy9YNTA5X2RlbW8iLCJhdWQiOiJodHRwczovL2xvY2FsaG9zdDo4NDQzL2F1dGgvcmVhbG1zL1g1MDlfZGVtbyIsInN1YiI6IjA4NmIxODJkLTQzN2EtNDNkMi1hNGZlLTlkZmZhM2Y5MGJkMCIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJhY2NvdW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImMwYjNiMTJjLTM5YmEtNGQ0Ni1iNDNlLTZkMTM0MGJmNTA5OCIsInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImMwYjNiMTJjLTM5YmEtNGQ0Ni1iNDNlLTZkMTM0MGJmNTA5OCJ9.MubgR9rvyrmSOcaq5ce-qVTPenVQye1KsEHJr7nh9-A\", \"token_type\": \"Bearer\", \"not-before-policy\": 0, \"session_state\": \"c0b3b12c-39ba-4d46-b43e-6d1340bf5098\", \"scope\": \"profile email\" }", "deny-role1 = You do not have required role!", "oc create -f <(echo ' kind: OAuthClient apiVersion: v1 metadata: name: kc-client 1 secret: \"...\" 2 redirectURIs: - \"http://www.example.com/\" 3 grantMethod: prompt 4 ')", "curl -s -k -H \"Authorization: Bearer USD(oc whoami -t)\" \\https://<openshift-user-facing-api-url>/apis/config.openshift.io/v1/infrastructures/cluster | jq \".status.apiServerURL\"", "oc create -f <(echo ' kind: OAuthClient apiVersion: oauth.openshift.io/v1 metadata: name: keycloak-broker 1 secret: \"...\" 2 redirectURIs: - \"<copy pasted Redirect URI from OpenShift 4 Identity Providers page>\" 3 grantMethod: prompt 4 ')", "http[s]://{host:port}/auth/realms/{realm-name}/broker/{broker-alias}/endpoint/descriptor", "http[s]://{host:port}/auth/realms/USD{realm-name}/broker/{broker-alias}/login", "GET /myapplication.com?kc_idp_hint=facebook HTTP/1.1 Host: localhost:8080", "const keycloak = new Keycloak('keycloak.json'); keycloak.createLoginUrl({ idpHint: 'facebook' });", "GET /auth/realms/{realm}/broker/{provider_alias}/token HTTP/1.1 Host: localhost:8080 Authorization: Bearer <KEYCLOAK ACCESS TOKEN>", "<spi name=\"ciba-auth-channel\"> <default-provider>ciba-http-auth-channel</default-provider> <provider name=\"ciba-http-auth-channel\" enabled=\"true\"> <properties> <property name=\"httpAuthenticationChannelUri\" value=\"https://backend.internal.example.com/auth\"/> </properties> </provider> </spi>", "POST [delegation_reception]", "POST /auth/realms/[realm]/protocol/openid-connect/ext/ciba/auth/callback", "https://localhost:8080/auth", "https://myhost.com/myapp/k_jwks", "POST /auth/realms/demo/protocol/openid-connect/token Authorization: Basic cHJvZHVjdC1zYS1jbGllbnQ6cGFzc3dvcmQ= Content-Type: application/x-www-form-urlencoded grant_type=client_credentials", "HTTP/1.1 200 OK Content-Type: application/json;charset=UTF-8 Cache-Control: no-store Pragma: no-cache { \"access_token\":\"2YotnFZFEjr1zCsicMWpAA\", \"token_type\":\"bearer\", \"expires_in\":60 }", "\"audience\": [ \"<trusted service>\" ]", "root/auth/realms/{realm}/protocol/saml/descriptor", "http://host:port/auth/realms/master/clients/account/redirect", "scope=openid phone", "**USD{vault.**_key_**}**", "<spi name=\"vault\"> <default-provider>files-plaintext</default-provider> <provider name=\"files-plaintext\" enabled=\"true\"> <properties> <property name=\"dir\" value=\"USD{jboss.home.dir}/standalone/configuration/vault/\" /> </properties> </provider> </spi>", "/subsystem=keycloak-server/spi=vault/:add /subsystem=keycloak-server/spi=vault/provider=files-plaintext/:add(enabled=true,properties={dir => \"USD{jboss.home.dir}/standalone/configuration/vault\"}) /subsystem=keycloak-server/spi=vault:write-attribute(name=default-provider,value=files-plaintext)", "<spi name=\"vault\"> <default-provider>elytron-cs-keystore</default-provider> <provider name=\"elytron-cs-keystore\" enabled=\"true\"> <properties> <property name=\"location\" value=\"USD{jboss.home.dir}/standalone/configuration/vault/credential-store.jceks\" /> <property name=\"secret\" value=\"secretpw1!\"/> </properties> </provider> </spi>", "<spi name=\"vault\"> <default-provider>elytron-cs-keystore</default-provider> <provider name=\"elytron-cs-keystore\" enabled=\"true\"> <properties> <property name=\"location\" value=\"USD{jboss.home.dir}/standalone/configuration/vault/credential-store.p12\" /> <property name=\"secret\" value=\"secretpw1!\"/> <property name=\"keyStoreType\" value=\"PKCS12\"/> </properties> </provider> </spi>", "<spi name=\"vault\"> <property name=\"secret\" value=\"MASK-3u2HNQaMogJJ8VP7J6gRIl;12345678;321\"/> </spi>", "<spi name=\"vault\"> <default-provider>files-plaintext</default-provider> <provider name=\"files-plaintext\" enabled=\"true\"> <properties> <property name=\"dir\" value=\"USD{jboss.home.dir}/standalone/configuration/vault/\" /> <property name=\"keyResolvers\" value=\"REALM_UNDERSCORE_KEY, KEY_ONLY\"/> </properties> </provider> </spi>", "[standalone@localhost:9990 /] /subsystem=elytron/credential-store=test-store:add(create=true, location=/home/test/test-store.p12, credential-reference={clear-text=testpwd1!},implementation-properties={keyStoreType=PKCS12})", "/subsystem=elytron/credential-store=test-store:add-alias(alias=ldaptest_ldap__secret,secret-value=secret12)", "keytool -list -keystore /home/test/test-store.p12 -storetype PKCS12 -storepass testpwd1! Keystore type: PKCS12 Keystore provider: SUN Your keystore contains 1 entries ldaptest_ldap__secret/passwordcredential/clear/, Oct 12, 2020, SecretKeyEntry,", "/subsystem=keycloak-server/spi=vault:add(default-provider=elytron-cs-keystore) /subsystem=keycloak-server/spi=vault/provider=elytron-cs-keystore:add(enabled=true, properties={location=>/home/test/test-store.p12, secret=>testpwd1!, keyStoreType=>PKCS12})", "<spi name=\"vault\"> <default-provider>elytron-cs-keystore</default-provider> <provider name=\"elytron-cs-keystore\" enabled=\"true\"> <properties> <property name=\"location\" value=\"/home/test/test-store.p12\"/> <property name=\"secret\" value=\"testpwd1!\"/> <property name=\"keyStoreType\" value=\"PKCS12\"/> </properties> </provider> </spi> <credential-stores> <credential-store name=\"test-store\" location=\"/home/test/test-store.p12\" create=\"true\"> <implementation-properties> <property name=\"keyStoreType\" value=\"PKCS12\"/> </implementation-properties> <credential-reference clear-text=\"testpwd1!\"/> </credential-store> </credential-stores>", "EAP_HOME/bin/elytron-tool.sh mask --salt SALT --iteration ITERATION_COUNT --secret PASSWORD", "elytron-tool.sh mask --salt 12345678 --iteration 123 --secret testpwd1! MASK-3BUbFEyWu0lRAu8.fCqyUk;12345678;123", "/subsystem=elytron/credential-store=cs-store:write-attribute(name=credential-reference.clear-text,value=\"MASK-3BUbFEyWu0lRAu8.fCqyUk;12345678;123\")", "/subsystem=keycloak-server/spi=vault/provider=elytron-cs-keystore:remove() /subsystem=keycloak-server/spi=vault/provider=elytron-cs-keystore:add(enabled=true, properties={location=>/home/test/test-store.p12, secret=>\"MASK-3BUbFEyWu0lRAu8.fCqyUk;12345678;123\", keyStoreType=>PKCS12})", "<spi name=\"vault\"> <default-provider>elytron-cs-keystore</default-provider> <provider name=\"elytron-cs-keystore\" enabled=\"true\"> <properties> <property name=\"location\" value=\"/home/test/test-store.p12\"/> <property name=\"secret\" value=\"MASK-3BUbFEyWu0lRAu8.fCqyUk;12345678;123\"/> <property name=\"keyStoreType\" value=\"PKCS12\"/> </properties> </provider> </spi> . .. <credential-stores> <credential-store name=\"test-store\" location=\"/home/test/test-store.p12\" create=\"true\"> <implementation-properties> <property name=\"keyStoreType\" value=\"PKCS12\"/> </implementation-properties> <credential-reference clear-text=\"MASK-3BUbFEyWu0lRAu8.fCqyUk;12345678;123\"/> </credential-store> </credential-stores>", "11:36:09,965 WARN [org.keycloak.events] (default task-51) type=LOGIN_ERROR, realmId=master, clientId=myapp, userId=19aeb848-96fc-44f6-b0a3-59a17570d374, ipAddress=127.0.0.1, error=invalid_user_credentials, auth_method=openid-connect, auth_type=code, redirect_uri=http://localhost:8180/myapp, code_id=b669da14-cdbb-41d0-b055-0810a0334607, username=admin", "<subsystem xmlns=\"urn:jboss:domain:logging:...\"> <logger category=\"org.keycloak.events\"> <level name=\"DEBUG\"/> </logger> </subsystem>", "<subsystem xmlns=\"urn:jboss:domain:keycloak-server:...\"> <spi name=\"eventsListener\"> <provider name=\"jboss-logging\" enabled=\"true\"> <properties> <property name=\"success-level\" value=\"info\"/> <property name=\"error-level\" value=\"error\"/> </properties> </provider> </spi> </subsystem>", "<spi name=\"eventsListener\"> <provider name=\"email\" enabled=\"true\"> <properties> <property name=\"exclude-events\" value=\"[&quot;UPDATE_TOTP&quot;,&quot;REMOVE_TOTP&quot;]\"/> </properties> </provider> </spi>", "<spi name=\"eventsStore\"> <provider name=\"jpa\" enabled=\"true\"> <properties> <property name=\"max-detail-length\" value=\"1000\"/> </properties> </provider> </spi>", "bin/standalone.sh -Dkeycloak.migration.action=export -Dkeycloak.migration.provider=dir -Dkeycloak.migration.dir=<DIR TO EXPORT TO>", "bin/standalone.sh -Dkeycloak.migration.action=export -Dkeycloak.migration.provider=singleFile -Dkeycloak.migration.file=<FILE TO EXPORT TO>", "bin/standalone.sh -Dkeycloak.migration.action=import -Dkeycloak.migration.provider=singleFile -Dkeycloak.migration.file=<FILE TO IMPORT> -Dkeycloak.migration.strategy=OVERWRITE_EXISTING", "<subsystem xmlns=\"urn:jboss:domain:undertow:12.0\"> <server name=\"default-server\"> <host name=\"default-host\" alias=\"localhost\"> <filter-ref name=\"ipAccess\"/> </host> </server> <filters> <expression-filter name=\"ipAccess\" expression=\"path-prefix('/auth/admin') -> ip-access-control(acl={'10.0.0.0/24 allow'})\"/> </filters> </subsystem>", "/subsystem=undertow/configuration=filter/expression-filter=ipAccess:add(,expression=\"path-prefix[/auth/admin] -> ip-access-control(acl={'10.0.0.0/24 allow'})\") /subsystem=undertow/server=default-server/host=default-host/filter-ref=ipAccess:add()", "<subsystem xmlns=\"urn:jboss:domain:undertow:12.0\"> <server name=\"default-server\"> <https-listener name=\"https\" socket-binding=\"https\" security-realm=\"ApplicationRealm\" enable-http2=\"true\"/> <https-listener name=\"https-admin\" socket-binding=\"https-admin\" security-realm=\"ApplicationRealm\" enable-http2=\"true\"/> <host name=\"default-host\" alias=\"localhost\"> <filter-ref name=\"portAccess\"/> </host> </server> <filters> <expression-filter name=\"portAccess\" expression=\"path-prefix('/auth/admin') and not equals(%p, 8444) -> response-code(403)\"/> </filters> </subsystem> <socket-binding-group name=\"standard-sockets\" default-interface=\"public\" port-offset=\"USD{jboss.socket.binding.port-offset:0}\"> <socket-binding name=\"https\" port=\"USD{jboss.https.port:8443}\"/> <socket-binding name=\"https-admin\" port=\"USD{jboss.https.port:8444}\"/> </socket-binding-group>", "/socket-binding-group=standard-sockets/socket-binding=https-admin/:add(port=8444) /subsystem=undertow/server=default-server/https-listener=https-admin:add(socket-binding=https-admin, security-realm=ApplicationRealm, enable-http2=true) /subsystem=undertow/configuration=filter/expression-filter=portAccess:add(,expression=\"path-prefix('/auth/admin') and not equals(%p, 8444) -> response-code(403)\") /subsystem=undertow/server=default-server/host=default-host/filter-ref=portAccess:add()", "<spi name=\"userProfile\"> <provider name=\"legacy-user-profile\" enabled=\"true\"> <properties> <property name=\"read-only-attributes\" value=\"[&quot;foo&quot;,&quot;bar*&quot;]\"/> <property name=\"admin-read-only-attributes\" value=\"[&quot;foo&quot;]\"/> </properties> </provider> </spi>", "/subsystem=keycloak-server/spi=userProfile/:add /subsystem=keycloak-server/spi=userProfile/provider=legacy-user-profile/:add(properties={},enabled=true) /subsystem=keycloak-server/spi=userProfile/provider=legacy-user-profile/:map-put(name=properties,key=read-only-attributes,value=[foo,bar*]) /subsystem=keycloak-server/spi=userProfile/provider=legacy-user-profile/:map-put(name=properties,key=admin-read-only-attributes,value=[foo])", "<subsystem xmlns=\"urn:jboss:domain:keycloak-server:1.2\"> <spi name=\"authenticationSessions\"> <default-provider>infinispan</default-provider> <provider name=\"infinispan\" enabled=\"true\"> <properties> <property name=\"authSessionsLimit\" value=\"100\"/> </properties> </provider> </spi> </subsystem>", "/subsystem=keycloak-server/spi=authenticationSessions:add(default-provider=infinispan) /subsystem=keycloak-server/spi=authenticationSessions/provider=infinispan:add(properties={authSessionsLimit => \"100\"},enabled=true)", "export PATH=USDPATH:USDKEYCLOAK_HOME/bin kcadm.sh", "c:\\> set PATH=%PATH%;%KEYCLOAK_HOME%\\bin c:\\> kcadm", "kcadm.sh config credentials --server http://localhost:8080/auth --realm demo --user admin --client admin kcadm.sh create realms -s realm=demorealm -s enabled=true -o CID=USD(kcadm.sh create clients -r demorealm -s clientId=my_client -s 'redirectUris=[\"http://localhost:8980/myapp/*\"]' -i) kcadm.sh get clients/USDCID/installation/providers/keycloak-oidc-keycloak-json", "c:\\> kcadm config credentials --server http://localhost:8080/auth --realm demo --user admin --client admin c:\\> kcadm create realms -s realm=demorealm -s enabled=true -o c:\\> kcadm create clients -r demorealm -s clientId=my_client -s \"redirectUris=[\\\"http://localhost:8980/myapp/*\\\"]\" -i > clientid.txt c:\\> set /p CID=<clientid.txt c:\\> kcadm get clients/%CID%/installation/providers/keycloak-oidc-keycloak-json", "kcadm.sh config truststore --trustpass USDPASSWORD ~/.keycloak/truststore.jks", "c:\\> kcadm config truststore --trustpass %PASSWORD% %HOMEPATH%\\.keycloak\\truststore.jks", "kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user admin --password admin", "kcadm.sh get realms --no-config --server http://localhost:8080/auth --realm master --user admin --password admin", "kcadm.sh create ENDPOINT [ARGUMENTS] kcadm.sh get ENDPOINT [ARGUMENTS] kcadm.sh update ENDPOINT [ARGUMENTS] kcadm.sh delete ENDPOINT [ARGUMENTS]", "SERVER_URI/admin/realms/REALM/ENDPOINT", "SERVER_URI/admin/realms", "SERVER_URI/admin/realms/TARGET_REALM/ENDPOINT", "kcadm.sh config credentials --server http://localhost:8080/auth --realm master --user admin --password admin kcadm.sh create users -s username=testuser -s enabled=true -r demorealm", "kcadm.sh get realms/demorealm > demorealm.json vi demorealm.json kcadm.sh update realms/demorealm -f demorealm.json", "kcadm.sh update realms/demorealm -s enabled=false", "kcadm.sh create realms -s realm=demorealm -s enabled=true", "kcadm.sh create realms -f demorealm.json", "kcadm.sh create realms -f - << EOF { \"realm\": \"demorealm\", \"enabled\": true } EOF", "c:\\> echo { \"realm\": \"demorealm\", \"enabled\": true } | kcadm create realms -f -", "kcadm.sh get realms", "kcadm.sh get realms --fields realm,enabled", "kcadm.sh get realms --fields realm --format csv --noquotes", "kcadm.sh get realms/master", "kcadm.sh update realms/demorealm -s enabled=false", "kcadm.sh get realms/demorealm > demorealm.json vi demorealm.json kcadm.sh update realms/demorealm -f demorealm.json", "kcadm.sh delete realms/demorealm", "kcadm.sh update realms/demorealm -s registrationAllowed=true -s registrationEmailAsUsername=true -s rememberMe=true -s verifyEmail=true -s resetPasswordAllowed=true -s editUsernameAllowed=true", "kcadm.sh get keys -r demorealm", "kcadm.sh get realms/demorealm --fields id --format csv --noquotes", "kcadm.sh create components -r demorealm -s name=rsa-generated -s providerId=rsa-generated -s providerType=org.keycloak.keys.KeyProvider -s parentId=959844c1-d149-41d7-8359-6aa527fca0b0 -s 'config.priority=[\"101\"]' -s 'config.enabled=[\"true\"]' -s 'config.active=[\"true\"]' -s 'config.keySize=[\"2048\"]'", "c:\\> kcadm create components -r demorealm -s name=rsa-generated -s providerId=rsa-generated -s providerType=org.keycloak.keys.KeyProvider -s parentId=959844c1-d149-41d7-8359-6aa527fca0b0 -s \"config.priority=[\\\"101\\\"]\" -s \"config.enabled=[\\\"true\\\"]\" -s \"config.active=[\\\"true\\\"]\" -s \"config.keySize=[\\\"2048\\\"]\"", "kcadm.sh create components -r demorealm -s name=java-keystore -s providerId=java-keystore -s providerType=org.keycloak.keys.KeyProvider -s parentId=959844c1-d149-41d7-8359-6aa527fca0b0 -s 'config.priority=[\"101\"]' -s 'config.enabled=[\"true\"]' -s 'config.active=[\"true\"]' -s 'config.keystore=[\"/opt/keycloak/keystore.jks\"]' -s 'config.keystorePassword=[\"secret\"]' -s 'config.keyPassword=[\"secret\"]' -s 'config.keyAlias=[\"localhost\"]'", "c:\\> kcadm create components -r demorealm -s name=java-keystore -s providerId=java-keystore -s providerType=org.keycloak.keys.KeyProvider -s parentId=959844c1-d149-41d7-8359-6aa527fca0b0 -s \"config.priority=[\\\"101\\\"]\" -s \"config.enabled=[\\\"true\\\"]\" -s \"config.active=[\\\"true\\\"]\" -s \"config.keystore=[\\\"/opt/keycloak/keystore.jks\\\"]\" -s \"config.keystorePassword=[\\\"secret\\\"]\" -s \"config.keyPassword=[\\\"secret\\\"]\" -s \"config.keyAlias=[\\\"localhost\\\"]\"", "kcadm.sh get keys -r demorealm", "kcadm.sh update components/PROVIDER_ID -r demorealm -s 'config.active=[\"false\"]'", "c:\\> kcadm update components/PROVIDER_ID -r demorealm -s \"config.active=[\\\"false\\\"]\"", "kcadm.sh get keys -r demorealm", "kcadm.sh delete components/PROVIDER_ID -r demorealm", "kcadm.sh update events/config -r demorealm -s 'eventsListeners=[\"jboss-logging\"]'", "c:\\> kcadm update events/config -r demorealm -s \"eventsListeners=[\\\"jboss-logging\\\"]\"", "kcadm.sh update events/config -r demorealm -s eventsEnabled=true -s 'enabledEventTypes=[\"LOGIN_ERROR\",\"REGISTER_ERROR\",\"LOGOUT_ERROR\",\"CODE_TO_TOKEN_ERROR\",\"CLIENT_LOGIN_ERROR\",\"FEDERATED_IDENTITY_LINK_ERROR\",\"REMOVE_FEDERATED_IDENTITY_ERROR\",\"UPDATE_EMAIL_ERROR\",\"UPDATE_PROFILE_ERROR\",\"UPDATE_PASSWORD_ERROR\",\"UPDATE_TOTP_ERROR\",\"VERIFY_EMAIL_ERROR\",\"REMOVE_TOTP_ERROR\",\"SEND_VERIFY_EMAIL_ERROR\",\"SEND_RESET_PASSWORD_ERROR\",\"SEND_IDENTITY_PROVIDER_LINK_ERROR\",\"RESET_PASSWORD_ERROR\",\"IDENTITY_PROVIDER_FIRST_LOGIN_ERROR\",\"IDENTITY_PROVIDER_POST_LOGIN_ERROR\",\"CUSTOM_REQUIRED_ACTION_ERROR\",\"EXECUTE_ACTIONS_ERROR\",\"CLIENT_REGISTER_ERROR\",\"CLIENT_UPDATE_ERROR\",\"CLIENT_DELETE_ERROR\"]' -s eventsExpiration=172800", "c:\\> kcadm update events/config -r demorealm -s eventsEnabled=true -s \"enabledEventTypes=[\\\"LOGIN_ERROR\\\",\\\"REGISTER_ERROR\\\",\\\"LOGOUT_ERROR\\\",\\\"CODE_TO_TOKEN_ERROR\\\",\\\"CLIENT_LOGIN_ERROR\\\",\\\"FEDERATED_IDENTITY_LINK_ERROR\\\",\\\"REMOVE_FEDERATED_IDENTITY_ERROR\\\",\\\"UPDATE_EMAIL_ERROR\\\",\\\"UPDATE_PROFILE_ERROR\\\",\\\"UPDATE_PASSWORD_ERROR\\\",\\\"UPDATE_TOTP_ERROR\\\",\\\"VERIFY_EMAIL_ERROR\\\",\\\"REMOVE_TOTP_ERROR\\\",\\\"SEND_VERIFY_EMAIL_ERROR\\\",\\\"SEND_RESET_PASSWORD_ERROR\\\",\\\"SEND_IDENTITY_PROVIDER_LINK_ERROR\\\",\\\"RESET_PASSWORD_ERROR\\\",\\\"IDENTITY_PROVIDER_FIRST_LOGIN_ERROR\\\",\\\"IDENTITY_PROVIDER_POST_LOGIN_ERROR\\\",\\\"CUSTOM_REQUIRED_ACTION_ERROR\\\",\\\"EXECUTE_ACTIONS_ERROR\\\",\\\"CLIENT_REGISTER_ERROR\\\",\\\"CLIENT_UPDATE_ERROR\\\",\\\"CLIENT_DELETE_ERROR\\\"]\" -s eventsExpiration=172800", "kcadm.sh update events/config -r demorealm -s enabledEventTypes=[]", "kcadm.sh update events/config -r demorealm -s adminEventsEnabled=true -s adminEventsDetailsEnabled=true", "kcadm.sh get events --offset 0 --limit 100", "kcadm delete events", "kcadm.sh create clear-realm-cache -r demorealm -s realm=demorealm kcadm.sh create clear-user-cache -r demorealm -s realm=demorealm kcadm.sh create clear-keys-cache -r demorealm -s realm=demorealm", "kcadm.sh create partialImport -r demorealm2 -s ifResourceExists=FAIL -o -f demorealm.json", "kcadm.sh create realms -s realm=demorealm2 -s enabled=true", "kcadm.sh create roles -r demorealm -s name=user -s 'description=Regular user with a limited set of permissions'", "kcadm.sh get clients -r demorealm --fields id,clientId", "kcadm.sh create clients/a95b6af3-0bdc-4878-ae2e-6d61a4eca9a0/roles -r demorealm -s name=editor -s 'description=Editor can edit, and publish any article'", "kcadm.sh get roles -r demorealm", "kcadm.sh get-roles -r demorealm", "kcadm.sh get-roles -r demorealm --cclientid realm-management", "kcadm.sh get roles/user -r demorealm", "kcadm.sh get-roles -r demorealm --rolename user", "kcadm.sh get-roles -r demorealm --cclientid realm-management --rolename manage-clients", "kcadm.sh update roles/user -r demorealm -s 'description=Role representing a regular user'", "kcadm.sh update clients/a95b6af3-0bdc-4878-ae2e-6d61a4eca9a0/roles/editor -r demorealm -s 'description=User that can edit, and publish articles'", "kcadm.sh delete roles/user -r demorealm", "kcadm.sh delete clients/a95b6af3-0bdc-4878-ae2e-6d61a4eca9a0/roles/editor -r demorealm", "kcadm.sh get-roles -r demorealm --rname testrole", "kcadm.sh get-roles -r demorealm --rname testrole --effective", "kcadm.sh get-roles -r demorealm --rname testrole --available", "kcadm.sh get-roles -r demorealm --rname testrole --cclientid realm-management", "kcadm.sh get-roles -r demorealm --rname testrole --cclientid realm-management --effective", "kcadm.sh get-roles -r demorealm --rname testrole --cclientid realm-management --available", "kcadm.sh add-roles --rname testrole --rolename user -r demorealm", "kcadm.sh remove-roles --rname testrole --rolename user -r demorealm", "kcadm.sh add-roles -r demorealm --rname testrole --cclientid realm-management --rolename create-client --rolename view-users", "kcadm.sh get-roles -r demorealm --cclientid test-client --rolename operations", "kcadm.sh add-roles -r demorealm --cclientid test-client --rid fc400897-ef6a-4e8c-872b-1581b7fa8a71 --rolename support", "kcadm.sh get-roles --rid fc400897-ef6a-4e8c-872b-1581b7fa8a71 --all", "kcadm.sh remove-roles -r demorealm --rname testrole --cclientid realm-management --rolename create-client --rolename view-users", "kcadm.sh add-roles -r demorealm --gname Group --cclientid realm-management --rolename create-client --rolename view-users", "kcadm.sh remove-roles -r demorealm --gname Group --cclientid realm-management --rolename create-client --rolename view-users", "kcadm.sh create clients -r demorealm -s clientId=myapp -s enabled=true", "kcadm.sh create clients -r demorealm -s clientId=myapp -s enabled=true -s clientAuthenticatorType=client-secret -s secret=d0b8122f-8dfb-46b7-b68a-f5cc4e25d000", "kcadm.sh get clients -r demorealm --fields id,clientId", "kcadm.sh get clients/c7b8547f-e748-4333-95d0-410b76b3f4a3 -r demorealm", "kcadm.sh get clients/USDCID/client-secret", "kcadm.sh create clients/USDCID/client-secret", "kcadm.sh update clients/USDCID -s \"secret=newSecret\"", "kcadm.sh get clients/c7b8547f-e748-4333-95d0-410b76b3f4a3/installation/providers/keycloak-oidc-keycloak-json -r demorealm", "kcadm.sh get clients/c7b8547f-e748-4333-95d0-410b76b3f4a3/installation/providers/keycloak-oidc-jboss-subsystem -r demorealm", "kcadm.sh get http://localhost:8080/auth/admin/realms/demorealm/clients/8f271c35-44e3-446f-8953-b0893810ebe7/installation/providers/docker-v2-compose-yaml -r demorealm > keycloak-docker-compose-yaml.zip", "kcadm.sh update clients/c7b8547f-e748-4333-95d0-410b76b3f4a3 -r demorealm -s enabled=false -s publicClient=true -s 'redirectUris=[\"http://localhost:8080/myapp/*\"]' -s baseUrl=http://localhost:8080/myapp -s adminUrl=http://localhost:8080/myapp", "c:\\> kcadm update clients/c7b8547f-e748-4333-95d0-410b76b3f4a3 -r demorealm -s enabled=false -s publicClient=true -s \"redirectUris=[\\\"http://localhost:8080/myapp/*\\\"]\" -s baseUrl=http://localhost:8080/myapp -s adminUrl=http://localhost:8080/myapp", "kcadm.sh delete clients/c7b8547f-e748-4333-95d0-410b76b3f4a3 -r demorealm", "kcadm.sh create users -r demorealm -s username=testuser -s enabled=true", "kcadm.sh get users -r demorealm --offset 0 --limit 1000", "kcadm.sh get users -r demorealm -q email=google.com kcadm.sh get users -r demorealm -q username=testuser", "kcadm.sh get users/0ba7a3fd-6fd8-48cd-a60b-2e8fd82d56e2 -r demorealm", "kcadm.sh update users/0ba7a3fd-6fd8-48cd-a60b-2e8fd82d56e2 -r demorealm -s 'requiredActions=[\"VERIFY_EMAIL\",\"UPDATE_PROFILE\",\"CONFIGURE_TOTP\",\"UPDATE_PASSWORD\"]'", "c:\\> kcadm update users/0ba7a3fd-6fd8-48cd-a60b-2e8fd82d56e2 -r demorealm -s \"requiredActions=[\\\"VERIFY_EMAIL\\\",\\\"UPDATE_PROFILE\\\",\\\"CONFIGURE_TOTP\\\",\\\"UPDATE_PASSWORD\\\"]\"", "kcadm.sh delete users/0ba7a3fd-6fd8-48cd-a60b-2e8fd82d56e2 -r demorealm", "kcadm.sh set-password -r demorealm --username testuser --new-password NEWPASSWORD --temporary", "kcadm.sh update users/0ba7a3fd-6fd8-48cd-a60b-2e8fd82d56e2/reset-password -r demorealm -s type=password -s value=NEWPASSWORD -s temporary=true -n", "kcadm.sh get-roles -r demorealm --uusername testuser", "kcadm.sh get-roles -r demorealm --uusername testuser --effective", "kcadm.sh get-roles -r demorealm --uusername testuser --available", "kcadm.sh get-roles -r demorealm --uusername testuser --cclientid realm-management", "kcadm.sh get-roles -r demorealm --uusername testuser --cclientid realm-management --effective", "kcadm.sh get-roles -r demorealm --uusername testuser --cclientid realm-management --available", "kcadm.sh add-roles --uusername testuser --rolename user -r demorealm", "kcadm.sh remove-roles --uusername testuser --rolename user -r demorealm", "kcadm.sh add-roles -r demorealm --uusername testuser --cclientid realm-management --rolename create-client --rolename view-users", "kcadm.sh remove-roles -r demorealm --uusername testuser --cclientid realm-management --rolename create-client --rolename view-users", "USDkcadm get users/6da5ab89-3397-4205-afaa-e201ff638f9e/sessions", "kcadm.sh delete sessions/d0eaa7cc-8c5d-489d-811a-69d3c4ec84d1", "kcadm.sh create users/6da5ab89-3397-4205-afaa-e201ff638f9e/logout -r demorealm -s realm=demorealm -s user=6da5ab89-3397-4205-afaa-e201ff638f9e", "kcadm.sh create groups -r demorealm -s name=Group", "kcadm.sh get groups -r demorealm", "kcadm.sh get groups/51204821-0580-46db-8f2d-27106c6b5ded -r demorealm", "kcadm.sh update groups/51204821-0580-46db-8f2d-27106c6b5ded -s 'attributes.email=[\"[email protected]\"]' -r demorealm", "kcadm.sh delete groups/51204821-0580-46db-8f2d-27106c6b5ded -r demorealm", "kcadm.sh create groups/51204821-0580-46db-8f2d-27106c6b5ded/children -r demorealm -s name=SubGroup", "kcadm.sh create groups/51204821-0580-46db-8f2d-27106c6b5ded/children -r demorealm -s id=08d410c6-d585-4059-bb07-54dcb92c5094 -s name=SubGroup", "kcadm.sh get users/b544f379-5fc4-49e5-8a8d-5cfb71f46f53/groups -r demorealm", "kcadm.sh update users/b544f379-5fc4-49e5-8a8d-5cfb71f46f53/groups/ce01117a-7426-4670-a29a-5c118056fe20 -r demorealm -s realm=demorealm -s userId=b544f379-5fc4-49e5-8a8d-5cfb71f46f53 -s groupId=ce01117a-7426-4670-a29a-5c118056fe20 -n", "kcadm.sh delete users/b544f379-5fc4-49e5-8a8d-5cfb71f46f53/groups/ce01117a-7426-4670-a29a-5c118056fe20 -r demorealm", "kcadm.sh get-roles -r demorealm --gname Group", "kcadm.sh get-roles -r demorealm --gname Group --effective", "kcadm.sh get-roles -r demorealm --gname Group --available", "kcadm.sh get-roles -r demorealm --gname Group --cclientid realm-management", "kcadm.sh get-roles -r demorealm --gname Group --cclientid realm-management --effective", "kcadm.sh get-roles -r demorealm --gname Group --cclientid realm-management --available", "kcadm.sh get serverinfo -r demorealm --fields 'identityProviders(*)'", "kcadm.sh get identity-provider/instances -r demorealm --fields alias,providerId,enabled", "kcadm.sh get identity-provider/instances/facebook -r demorealm", "kcadm.sh delete identity-provider/instances/facebook -r demorealm", "kcadm.sh create identity-provider/instances -r demorealm -s alias=keycloak-oidc -s providerId=keycloak-oidc -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.authorizationUrl=http://localhost:8180/auth/realms/demorealm/protocol/openid-connect/auth -s config.tokenUrl=http://localhost:8180/auth/realms/demorealm/protocol/openid-connect/token -s config.clientId=demo-oidc-provider -s config.clientSecret=secret", "kcadm.sh create identity-provider/instances -r demorealm -s alias=saml -s providerId=saml -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.singleSignOnServiceUrl=http://localhost:8180/auth/realms/saml-broker-realm/protocol/saml -s config.nameIDPolicyFormat=urn:oasis:names:tc:SAML:2.0:nameid-format:persistent -s config.signatureAlgorithm=RSA_SHA256", "kcadm.sh create identity-provider/instances -r demorealm -s alias=facebook -s providerId=facebook -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=FACEBOOK_CLIENT_ID -s config.clientSecret=FACEBOOK_CLIENT_SECRET", "kcadm.sh create identity-provider/instances -r demorealm -s alias=google -s providerId=google -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=GOOGLE_CLIENT_ID -s config.clientSecret=GOOGLE_CLIENT_SECRET", "kcadm.sh create identity-provider/instances -r demorealm -s alias=google -s providerId=google -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=TWITTER_API_KEY -s config.clientSecret=TWITTER_API_SECRET", "kcadm.sh create identity-provider/instances -r demorealm -s alias=github -s providerId=github -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=GITHUB_CLIENT_ID -s config.clientSecret=GITHUB_CLIENT_SECRET", "kcadm.sh create identity-provider/instances -r demorealm -s alias=linkedin -s providerId=linkedin -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=LINKEDIN_CLIENT_ID -s config.clientSecret=LINKEDIN_CLIENT_SECRET", "kcadm.sh create identity-provider/instances -r demorealm -s alias=microsoft -s providerId=microsoft -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=MICROSOFT_APP_ID -s config.clientSecret=MICROSOFT_PASSWORD", "kcadm.sh create identity-provider/instances -r demorealm -s alias=stackoverflow -s providerId=stackoverflow -s enabled=true -s 'config.useJwksUrl=\"true\"' -s config.clientId=STACKAPPS_CLIENT_ID -s config.clientSecret=STACKAPPS_CLIENT_SECRET -s config.key=STACKAPPS_KEY", "kcadm.sh create components -r demorealm -s parentId=demorealmId -s id=demokerberos -s name=demokerberos -s providerId=kerberos -s providerType=org.keycloak.storage.UserStorageProvider -s 'config.priority=[\"0\"]' -s 'config.debug=[\"false\"]' -s 'config.allowPasswordAuthentication=[\"true\"]' -s 'config.editMode=[\"UNSYNCED\"]' -s 'config.updateProfileFirstLogin=[\"true\"]' -s 'config.allowKerberosAuthentication=[\"true\"]' -s 'config.kerberosRealm=[\"KEYCLOAK.ORG\"]' -s 'config.keyTab=[\"http.keytab\"]' -s 'config.serverPrincipal=[\"HTTP/[email protected]\"]' -s 'config.cachePolicy=[\"DEFAULT\"]'", "kcadm.sh create components -r demorealm -s name=kerberos-ldap-provider -s providerId=ldap -s providerType=org.keycloak.storage.UserStorageProvider -s parentId=3d9c572b-8f33-483f-98a6-8bb421667867 -s 'config.priority=[\"1\"]' -s 'config.fullSyncPeriod=[\"-1\"]' -s 'config.changedSyncPeriod=[\"-1\"]' -s 'config.cachePolicy=[\"DEFAULT\"]' -s config.evictionDay=[] -s config.evictionHour=[] -s config.evictionMinute=[] -s config.maxLifespan=[] -s 'config.batchSizeForSync=[\"1000\"]' -s 'config.editMode=[\"WRITABLE\"]' -s 'config.syncRegistrations=[\"false\"]' -s 'config.vendor=[\"other\"]' -s 'config.usernameLDAPAttribute=[\"uid\"]' -s 'config.rdnLDAPAttribute=[\"uid\"]' -s 'config.uuidLDAPAttribute=[\"entryUUID\"]' -s 'config.userObjectClasses=[\"inetOrgPerson, organizationalPerson\"]' -s 'config.connectionUrl=[\"ldap://localhost:10389\"]' -s 'config.usersDn=[\"ou=People,dc=keycloak,dc=org\"]' -s 'config.authType=[\"simple\"]' -s 'config.bindDn=[\"uid=admin,ou=system\"]' -s 'config.bindCredential=[\"secret\"]' -s 'config.searchScope=[\"1\"]' -s 'config.useTruststoreSpi=[\"ldapsOnly\"]' -s 'config.connectionPooling=[\"true\"]' -s 'config.pagination=[\"true\"]' -s 'config.allowKerberosAuthentication=[\"true\"]' -s 'config.serverPrincipal=[\"HTTP/[email protected]\"]' -s 'config.keyTab=[\"http.keytab\"]' -s 'config.kerberosRealm=[\"KEYCLOAK.ORG\"]' -s 'config.debug=[\"true\"]' -s 'config.useKerberosForPasswordAuthentication=[\"true\"]'", "kcadm.sh delete components/3d9c572b-8f33-483f-98a6-8bb421667867 -r demorealm", "kcadm.sh create user-storage/b7c63d02-b62a-4fc1-977c-947d6a09e1ea/sync?action=triggerFullSync", "kcadm.sh create user-storage/b7c63d02-b62a-4fc1-977c-947d6a09e1ea/sync?action=triggerChangedUsersSync", "kcadm.sh create testLDAPConnection -s action=testConnection -s bindCredential=secret -s bindDn=uid=admin,ou=system -s connectionUrl=ldap://localhost:10389 -s useTruststoreSpi=ldapsOnly", "kcadm.sh create testLDAPConnection -s action=testAuthentication -s bindCredential=secret -s bindDn=uid=admin,ou=system -s connectionUrl=ldap://localhost:10389 -s useTruststoreSpi=ldapsOnly", "kcadm.sh create components -r demorealm -s name=hardcoded-ldap-role-mapper -s providerId=hardcoded-ldap-role-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=b7c63d02-b62a-4fc1-977c-947d6a09e1ea -s 'config.role=[\"realm-management.create-client\"]'", "kcadm.sh create components -r demorealm -s name=msad-user-account-control-mapper -s providerId=msad-user-account-control-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=b7c63d02-b62a-4fc1-977c-947d6a09e1ea", "kcadm.sh create components -r demorealm -s name=user-attribute-ldap-mapper -s providerId=user-attribute-ldap-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=b7c63d02-b62a-4fc1-977c-947d6a09e1ea -s 'config.\"user.model.attribute\"=[\"email\"]' -s 'config.\"ldap.attribute\"=[\"mail\"]' -s 'config.\"read.only\"=[\"false\"]' -s 'config.\"always.read.value.from.ldap\"=[\"false\"]' -s 'config.\"is.mandatory.in.ldap\"=[\"false\"]'", "kcadm.sh create components -r demorealm -s name=group-ldap-mapper -s providerId=group-ldap-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=b7c63d02-b62a-4fc1-977c-947d6a09e1ea -s 'config.\"groups.dn\"=[]' -s 'config.\"group.name.ldap.attribute\"=[\"cn\"]' -s 'config.\"group.object.classes\"=[\"groupOfNames\"]' -s 'config.\"preserve.group.inheritance\"=[\"true\"]' -s 'config.\"membership.ldap.attribute\"=[\"member\"]' -s 'config.\"membership.attribute.type\"=[\"DN\"]' -s 'config.\"groups.ldap.filter\"=[]' -s 'config.mode=[\"LDAP_ONLY\"]' -s 'config.\"user.roles.retrieve.strategy\"=[\"LOAD_GROUPS_BY_MEMBER_ATTRIBUTE\"]' -s 'config.\"mapped.group.attributes\"=[\"admins-group\"]' -s 'config.\"drop.non.existing.groups.during.sync\"=[\"false\"]' -s 'config.roles=[\"admins\"]' -s 'config.groups=[\"admins-group\"]' -s 'config.group=[]' -s 'config.preserve=[\"true\"]' -s 'config.membership=[\"member\"]'", "kcadm.sh create components -r demorealm -s name=full-name-ldap-mapper -s providerId=full-name-ldap-mapper -s providerType=org.keycloak.storage.ldap.mappers.LDAPStorageMapper -s parentId=b7c63d02-b62a-4fc1-977c-947d6a09e1ea -s 'config.\"ldap.full.name.attribute\"=[\"cn\"]' -s 'config.\"read.only\"=[\"false\"]' -s 'config.\"write.only\"=[\"true\"]'", "kcadm.sh update realms/demorealm -s 'passwordPolicy=\"hashIterations and specialChars and upperCase and digits and notUsername and length\"'", "kcadm.sh update realms/demorealm -s 'passwordPolicy=\"hashIterations(25000) and specialChars(2) and upperCase(2) and lowerCase(2) and digits(2) and length(9) and notUsername and passwordHistory(4)\"'", "kcadm.sh get realms/demorealm --fields passwordPolicy", "kcadm.sh get authentication/flows -r demorealm", "kcadm.sh get authentication/flows/febfd772-e1a1-42fb-b8ae-00c0566fafb8 -r demorealm", "kcadm.sh get authentication/flows/Copy%20of%20browser/executions -r demorealm", "kcadm create \"authentication/executions/a3147129-c402-4760-86d9-3f2345e401c7/config\" -r examplerealm -b '{\"config\":{\"x509-cert-auth.mapping-source-selection\":\"Match SubjectDN using regular expression\",\"x509-cert-auth.regular-expression\":\"(.*?)(?:USD)\",\"x509-cert-auth.mapper-selection\":\"Custom Attribute Mapper\",\"x509-cert-auth.mapper-selection.user-attribute-name\":\"usercertificate\",\"x509-cert-auth.crl-checking-enabled\":\"\",\"x509-cert-auth.crldp-checking-enabled\":false,\"x509-cert-auth.crl-relative-path\":\"crl.pem\",\"x509-cert-auth.ocsp-checking-enabled\":\"\",\"x509-cert-auth.ocsp-responder-uri\":\"\",\"x509-cert-auth.keyusage\":\"\",\"x509-cert-auth.extendedkeyusage\":\"\",\"x509-cert-auth.confirmation-page-disallowed\":\"\"},\"alias\":\"my_otp_config\"}'", "kcadm get \"authentication/config/dd91611a-d25c-421a-87e2-227c18421833\" -r examplerealm", "kcadm update \"authentication/config/dd91611a-d25c-421a-87e2-227c18421833\" -r examplerealm -b '{\"id\":\"dd91611a-d25c-421a-87e2-227c18421833\",\"alias\":\"my_otp_config\",\"config\":{\"x509-cert-auth.extendedkeyusage\":\"\",\"x509-cert-auth.mapper-selection.user-attribute-name\":\"usercertificate\",\"x509-cert-auth.ocsp-responder-uri\":\"\",\"x509-cert-auth.regular-expression\":\"(.*?)(?:USD)\",\"x509-cert-auth.crl-checking-enabled\":\"true\",\"x509-cert-auth.confirmation-page-disallowed\":\"\",\"x509-cert-auth.keyusage\":\"\",\"x509-cert-auth.mapper-selection\":\"Custom Attribute Mapper\",\"x509-cert-auth.crl-relative-path\":\"crl.pem\",\"x509-cert-auth.crldp-checking-enabled\":\"false\",\"x509-cert-auth.mapping-source-selection\":\"Match SubjectDN using regular expression\",\"x509-cert-auth.ocsp-checking-enabled\":\"\"}}'", "kcadm delete \"authentication/config/dd91611a-d25c-421a-87e2-227c18421833\" -r examplerealm" ]
https://docs.redhat.com/en/documentation/red_hat_single_sign-on/7.6/html-single/server_administration_guide/index
Introduction to the Migration Toolkit for Applications
Introduction to the Migration Toolkit for Applications Migration Toolkit for Applications 7.2 Introduction to the Migration Toolkit for Applications for managing applications during their migration to OpenShift Container Platform. Red Hat Customer Content Services
null
https://docs.redhat.com/en/documentation/migration_toolkit_for_applications/7.2/html/introduction_to_the_migration_toolkit_for_applications/index
Chapter 4. GFS2 quota management
Chapter 4. GFS2 quota management File system quotas are used to limit the amount of file system space a user or group can use. A user or group does not have a quota limit until one is set. When a GFS2 file system is mounted with the quota=on or quota=account option, GFS2 keeps track of the space used by each user and group even when there are no limits in place. GFS2 updates quota information in a transactional way so system crashes do not require quota usages to be reconstructed. To prevent a performance slowdown, a GFS2 node synchronizes updates to the quota file only periodically. The fuzzy quota accounting can allow users or groups to slightly exceed the set limit. To minimize this, GFS2 dynamically reduces the synchronization period as a hard quota limit is approached. Note GFS2 supports the standard Linux quota facilities. In order to use this you will need to install the quota RPM. This is the preferred way to administer quotas on GFS2 and should be used for all new deployments of GFS2 using quotas. For more information about disk quotas, see the man pages of the following commands: quotacheck edquota repquota quota 4.1. Configuring GFS2 disk quotas To implement disk quotas for GFS2 file systems, there are three steps to perform. The steps to perform to implement disk quotas are as follows: Set up quotas in enforcement or accounting mode. Initialize the quota database file with current block usage information. Assign quota policies. (In accounting mode, these policies are not enforced.) Each of these steps is discussed in detail in the following sections. 4.1.1. Setting up quotas in enforcement or accounting mode In GFS2 file systems, quotas are disabled by default. To enable quotas for a file system, mount the file system with the quota=on option specified. To mount a file system with quotas enabled, specify quota=on for the options argument when creating the GFS2 file system resource in a cluster. For example, the following command specifies that the GFS2 Filesystem resource being created will be mounted with quotas enabled. It is possible to keep track of disk usage and maintain quota accounting for every user and group without enforcing the limit and warn values. To do this, mount the file system with the quota=account option specified. To mount a file system with quotas disabled, specify quota=off for the options argument when creating the GFS2 file system resource in a cluster. 4.1.2. Creating the quota database files After each quota-enabled file system is mounted, the system is capable of working with disk quotas. However, the file system itself is not yet ready to support quotas. The step is to run the quotacheck command. The quotacheck command examines quota-enabled file systems and builds a table of the current disk usage per file system. The table is then used to update the operating system's copy of disk usage. In addition, the file system's disk quota files are updated. To create the quota files on the file system, use the -u and the -g options of the quotacheck command; both of these options must be specified for user and group quotas to be initialized. For example, if quotas are enabled for the /home file system, create the files in the /home directory: 4.1.3. Assigning quotas per user The last step is assigning the disk quotas with the edquota command. Note that if you have mounted your file system in accounting mode (with the quota=account option specified), the quotas are not enforced. To configure the quota for a user, as root in a shell prompt, execute the command: Perform this step for each user who needs a quota. For example, if a quota is enabled for the /home partition ( /dev/VolGroup00/LogVol02 in the example below) and the command edquota testuser is executed, the following is shown in the editor configured as the default for the system: Note The text editor defined by the EDITOR environment variable is used by edquota . To change the editor, set the EDITOR environment variable in your ~/.bash_profile file to the full path of the editor of your choice. The first column is the name of the file system that has a quota enabled for it. The second column shows how many blocks the user is currently using. The two columns are used to set soft and hard block limits for the user on the file system. The soft block limit defines the maximum amount of disk space that can be used. The hard block limit is the absolute maximum amount of disk space that a user or group can use. Once this limit is reached, no further disk space can be used. The GFS2 file system does not maintain quotas for inodes, so these columns do not apply to GFS2 file systems and will be blank. If any of the values are set to 0, that limit is not set. In the text editor, change the limits. For example: To verify that the quota for the user has been set, use the following command: You can also set quotas from the command line with the setquota command. For information about the setquota command, see the setquota (8) man page. 4.1.4. Assigning quotas per group Quotas can also be assigned on a per-group basis. Note that if you have mounted your file system in accounting mode (with the account=on option specified), the quotas are not enforced. To set a group quota for the devel group (the group must exist prior to setting the group quota), use the following command: This command displays the existing quota for the group in the text editor: The GFS2 file system does not maintain quotas for inodes, so these columns do not apply to GFS2 file systems and will be blank. Modify the limits, then save the file. To verify that the group quota has been set, use the following command: 4.2. Managing GFS2 disk Quotas If quotas are implemented, they need some maintenance, mostly in the form of watching to see if the quotas are exceeded and making sure the quotas are accurate. If users repeatedly exceed their quotas or consistently reach their soft limits, a system administrator has a few choices to make depending on what type of users they are and how much disk space impacts their work. The administrator can either help the user determine how to use less disk space or increase the user's disk quota. You can create a disk usage report by running the repquota utility. For example, the command repquota /home produces this output: To view the disk usage report for all (option -a ) quota-enabled file systems, use the command: The -- displayed after each user is a quick way to determine whether the block limits have been exceeded. If the block soft limit is exceeded, a + appears in place of the first - in the output. The second - indicates the inode limit, but GFS2 file systems do not support inode limits so that character will remain as - . GFS2 file systems do not support a grace period, so the grace column will remain blank. Note that the repquota command is not supported over NFS, irrespective of the underlying file system. 4.3. Keeping GFS2 disk quotas accurate with the quotacheck command If you enable quotas on your file system after a period of time when you have been running with quotas disabled, you should run the quotacheck command to create, check, and repair quota files. Additionally, you may want to run the quotacheck command if you think your quota files may not be accurate, as may occur when a file system is not unmounted cleanly after a system crash. For more information about the quotacheck command, see the quotacheck(8) man page. Note Run quotacheck when the file system is relatively idle on all nodes because disk activity may affect the computed quota values. 4.4. Synchronizing quotas with the quotasync Command GFS2 stores all quota information in its own internal file on disk. A GFS2 node does not update this quota file for every file system write; rather, by default it updates the quota file once every 60 seconds. This is necessary to avoid contention among nodes writing to the quota file, which would cause a slowdown in performance. As a user or group approaches their quota limit, GFS2 dynamically reduces the time between its quota-file updates to prevent the limit from being exceeded. The normal time period between quota synchronizations is a tunable parameter, quota_quantum . You can change this from its default value of 60 seconds using the quota_quantum= mount option, as described in the "GFS2-Specific Mount Options" table in Mounting a GFS2 file system that specifies mount options . The quota_quantum parameter must be set on each node and each time the file system is mounted. Changes to the quota_quantum parameter are not persistent across unmounts. You can update the quota_quantum value with the mount -o remount . You can use the quotasync command to synchronize the quota information from a node to the on-disk quota file between the automatic updates performed by GFS2. Usage Synchronizing Quota Information u Sync the user quota files. g Sync the group quota files a Sync all file systems that are currently quota-enabled and support sync. When -a is absent, a file system mountpoint should be specified. mountpoint Specifies the GFS2 file system to which the actions apply. You can tune the time between synchronizations by specifying a quota-quantum mount option. MountPoint Specifies the GFS2 file system to which the actions apply. secs Specifies the new time period between regular quota-file synchronizations by GFS2. Smaller values may increase contention and slow down performance. The following example synchronizes all the cached dirty quotas from the node it is run on to the on-disk quota file for the file system /mnt/mygfs2 . This following example changes the default time period between regular quota-file updates to one hour (3600 seconds) for file system /mnt/mygfs2 when remounting that file system on logical volume /dev/volgroup/logical_volume .
[ "pcs resource create gfs2mount Filesystem options=\"quota=on\" device=BLOCKDEVICE directory=MOUNTPOINT fstype=gfs2 clone", "quotacheck -ug /home", "edquota username", "Disk quotas for user testuser (uid 501): Filesystem blocks soft hard inodes soft hard /dev/VolGroup00/LogVol02 440436 0 0", "Disk quotas for user testuser (uid 501): Filesystem blocks soft hard inodes soft hard /dev/VolGroup00/LogVol02 440436 500000 550000", "quota testuser", "edquota -g devel", "Disk quotas for group devel (gid 505): Filesystem blocks soft hard inodes soft hard /dev/VolGroup00/LogVol02 440400 0 0", "quota -g devel", "*** Report for user quotas on device /dev/mapper/VolGroup00-LogVol02 Block grace time: 7days; Inode grace time: 7days Block limits File limits User used soft hard grace used soft hard grace ---------------------------------------------------------------------- root -- 36 0 0 4 0 0 kristin -- 540 0 0 125 0 0 testuser -- 440400 500000 550000 37418 0 0", "repquota -a", "quotasync [-ug] -a| mountpoint", "mount -o quota_quantum= secs ,remount BlockDevice MountPoint", "quotasync -ug /mnt/mygfs2", "mount -o quota_quantum=3600,remount /dev/volgroup/logical_volume /mnt/mygfs2" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/configuring_gfs2_file_systems/assembly_gfs2-disk-quota-administration-configuring-gfs2-file-systems
Chapter 1. Overview
Chapter 1. Overview AMQ Spring Boot Starter is an adapter for creating Spring-based applications that use AMQ messaging. It provides a Spring Boot starter module that enables you to build standalone Spring applications. The starter uses the AMQ JMS client to communicate using the AMQP 1.0 protocol. AMQ Spring Boot Starter is part of AMQ Clients, a suite of messaging libraries supporting multiple languages and platforms. For an overview of the clients, see AMQ Clients Overview . For information about this release, see AMQ Clients 2.10 Release Notes . AMQ Spring Boot Starter is based on the AMQP 1.0 JMS Spring Boot project. 1.1. Key features Quickly build standalone Spring applications with built-in messaging Automatic configuration of JMS resources Configurable pooling of JMS connections and sessions 1.2. Supported standards and protocols Version 2.2 of the Spring Boot API Version 2.0 of the Java Message Service API Version 1.0 of the Advanced Message Queueing Protocol (AMQP) 1.3. Supported configurations Refer to Red Hat AMQ 7 Supported Configurations on the Red Hat Customer Portal for current information regarding AMQ Spring Boot Starter supported configurations. 1.4. Document conventions The sudo command In this document, sudo is used for any command that requires root privileges. Exercise caution when using sudo because any changes can affect the entire system. For more information about sudo , see Using the sudo command . File paths In this document, all file paths are valid for Linux, UNIX, and similar operating systems (for example, /home/andrea ). On Microsoft Windows, you must use the equivalent Windows paths (for example, C:\Users\andrea ). Variable text This document contains code blocks with variables that you must replace with values specific to your environment. Variable text is enclosed in arrow braces and styled as italic monospace. For example, in the following command, replace <project-dir> with the value for your environment: USD cd <project-dir>
[ "cd <project-dir>" ]
https://docs.redhat.com/en/documentation/red_hat_amq/2021.q3/html/using_the_amq_spring_boot_starter/overview
Technical Reference
Technical Reference Red Hat Virtualization 4.3 The technical architecture of Red Hat Virtualization environments Red Hat Virtualization Documentation Team Red Hat Customer Content Services [email protected] Abstract This document describes the concepts, components, and technologies used in a Red Hat Virtualization environment.
null
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.3/html/technical_reference/index
Chapter 18. Configuring JAX-RS Endpoints
Chapter 18. Configuring JAX-RS Endpoints Abstract This chapter explains how to instantiate and configure JAX-RS server endpoints in Blueprint XML and in Spring XML, and also how to instantiate and configure JAX-RS client endpoints (client proxy beans) in XML 18.1. Configuring JAX-RS Server Endpoints 18.1.1. Defining a JAX-RS Server Endpoint Basic server endpoint definition To define a JAX-RS server endpoint in XML, you need to specify at least the following: A jaxrs:server element, which is used to define the endpoint in XML. Note that the jaxrs: namespace prefix maps to different namespaces in Blueprint and in Spring respectively. The base URL of the JAX-RS service, using the address attribute of the jaxrs:server element. Note that there are two different ways of specifying the address URL, which affects how the endpoint gets deployed: As a relative URL -for example, /customers . In this case, the endpoint is deployed into the default HTTP container, and the endpoint's base URL is implicitly obtained by combining the CXF servlet base URL with the specified relative URL. For example, if you deploy a JAX-RS endpoint to the Fuse container, the specified /customers URL would get resolved to the URL, http://Hostname:8181/cxf/customers (assuming that the container is using the default 8181 port). As an absolute URL - for example, http://0.0.0.0:8200/cxf/customers . In this case, a new HTTP listener port is opened for the JAX-RS endpoint (if it is not already open). For example, in the context of Fuse, a new Undertow container would implicitly be created to host the JAX-RS endpoint. The special IP address, 0.0.0.0 , acts as a wildcard, matching any of the hostnames assigned to the current host (which can be useful on multi-homed host machines). One or more JAX-RS root resource classes, which provide the implementation of the JAX-RS service. The simplest way to specify the resource classes is to list them inside a jaxrs:serviceBeans element. Blueprint example The following Blueprint XML example shows how to define a JAX-RS endpoint, which specifies the relative address, /customers (so that it deploys into the default HTTP container) and is implemented by the service.CustomerService resource class: Blueprint XML namespaces To define a JAX-RS endpoint in Blueprint, you typically require at least the following XML namespaces: Prefix Namespace (default) http://www.osgi.org/xmlns/blueprint/v1.0.0 cxf http://cxf.apache.org/blueprint/core jaxrs http://cxf.apache.org/blueprint/jaxrs Spring example The following Spring XML example shows how to define a JAX-RS endpoint, which specifies the relative address, /customers (so that it deploys into the default HTTP container) and is implemented by the service.CustomerService resource class: Spring XML namespaces To define a JAX-RS endpoint in Spring, you typically require at least the following XML namespaces: Prefix Namespace (default) http://www.springframework.org/schema/beans cxf http://cxf.apache.org/core jaxrs http://cxf.apache.org/jaxrs Auto-discovery in Spring XML (Spring only) Instead of specifying the JAX-RS root resource classes explicitly, Spring XML enables you to configure auto-discovery, so that specific Java packages are searched for resource classes (classes annotated by @Path ) and all of the discovered resource classes are automatically attached to the endpoint. In this case, you need to specify just the address attribute and the basePackages attribute in the jaxrs:server element. For example, to define a JAX-RS endpoint which uses all of the JAX-RS resource classes under the a.b.c Java package, you can define the endpoint in Spring XML, as follows: The auto-discovery mechanism also discovers and installs into the endpoint any JAX-RS provider classes that it finds under the specified Java packages. Lifecycle management in Spring XML (Spring only) Spring XML enables you to control the lifecycle of beans by setting the scope attribute on a bean element. The following scope values are supported by Spring: singleton (Default) Creates a single bean instance, which is used everywhere and lasts for the entire lifetime of the Spring container. prototype Creates a new bean instance every time the bean is injected into another bean or when a bean is obtained by invoking getBean() on the bean registry. request (Only available in a Web-aware container) Creates a new bean instance for every request invoked on the bean. session (Only available in a Web-aware container) Creates a new bean for the lifetime of a single HTTP session. globalSession (Only available in a Web-aware container) Creates a new bean for the lifetime of a single HTTP session that is shared between portlets. For more details about Spring scopes, please consult the Spring framework documentation on Bean scopes . Note that Spring scopes do not work properly , if you specify JAX-RS resource beans through the jaxrs:serviceBeans element. If you specify the scope attribute on the resource beans in this case, the scope attribute is effectively ignored. In order to make bean scopes work properly within a JAX-RS server endpoint, you require a level of indirection that is provided by a service factory. The simplest way to configure bean scopes is to specify resource beans using the beanNames attribute on the jaxrs:server element, as follows: Where the preceding example configures two resource beans, customerBean1 and customerBean2 . The beanNames attribute is specified as a space-separated list of resource bean IDs. For the ultimate degree of flexibility, you have the option of defining service factory objects explicitly , when you configure the JAX-RS server endpoint, using the jaxrs:serviceFactories element. This more verbose approach has the advantage that you can replace the default service factory implementation with your custom implementation, thus giving you ultimate control over the bean lifecycle. The following example shows how to configure the two resource beans, customerBean1 and customerBean2 , using this approach: Note If you specify a non-singleton lifecycle, it is often a good idea to implement and register a org.apache.cxf.service.Invoker bean (where the instance can be registered by referencing it from a jaxrs:server/jaxrs:invoker element). Attaching a WADL document You can optionally associate a WADL document with the JAX-RS server endpoint using the docLocation attribute on the jaxrs:server element. For example: Schema validation If you have some external XML schemas, for describing message content in JAX-B format, you can associate these external schemas with the JAX-RS server endpoint through the jaxrs:schemaLocations element. For example, if you have associated the server endpoint with a WADL document and you also want to enable schema validation on incoming messages, you can specify associated XML schema files as follows: Alternatively, if you want to include all of the schema files, *.xsd , in a given directory, you can just specify the directory name, as follows: Specifying schemas in this way is generally useful for any kind of functionality that requires access to the JAX-B schemas. Specifying the data binding You can use the jaxrs:dataBinding element to specify the data binding that encodes the message body in request and reply messages. For example, to specify the JAX-B data binding, you could configure a JAX-RS endpoint as follows: Or to specify the Aegis data binding, you could configure a JAX-RS endpoint as follows: Using the JMS transport It is possible to configure JAX-RS to use a JMS messaging library as a transport protocol, instead of HTTP. Because JMS itself is not a transport protocol, the actual messaging protocol depends on the particular JMS implementation that you configure. For example, the following Spring XML example shows how to configure a JAX-RS server endpoint to use the JMS transport protocol: Note the following points about the preceding example: JMS implementation -the JMS implementation is provided by the ConnectionFactory bean, which instantiates an Apache ActiveMQ connection factory object. After you instantiate the connection factory, it is automatically installed as the default JMS implementation layer. JMS conduit or destination object -Apache CXF implicitly instantiates a JMS conduit object (to represent a JMS consumer) or a JMS destination object (to represent a JMS provider). This object must be uniquely identified by a QName, which is defined through the attribute setttings xmlns:s="http://books.com" (defining the namespace prefix) and serviceName="s:BookService" (defining the QName). Transport ID -to select the JMS transport, the transportId attribute must be set to http://cxf.apache.org/transports/jms . JMS address -the jaxrs:server/@address attribute uses a standardized syntax to specify the JMS queue or JMS topic to send to. For details of this syntax, see https://tools.ietf.org/id/draft-merrick-jms-uri-06.txt . Extension mappings and language mappings A JAX-RS server endpoint can be configured so that it automatically maps a file suffix (appearing in the URL) to a MIME content type header, and maps a language suffix to a language type header. For example, consider a HTTP request of the following form: You can configure the JAX-RS server endpoint to map the .xml suffix automatically, as follows: When the preceding server endpoint receives the HTTP request, it automatically creates a new content type header of type, application/xml , and strips the .xml suffix from the resource URL. For the language mapping, consider a HTTP request of the following form: You can configure the JAX-RS server endpoint to map the .en suffix automatically, as follows: When the preceding server endpoint receives the HTTP request, it automatically creates a new accept language header with the value, en-gb , and strips the .en suffix from the resource URL. 18.1.2. jaxrs:server Attributes Attributes Table 18.1, "JAX-RS Server Endpoint Attributes" describes the attributes available on the jaxrs:server element. Table 18.1. JAX-RS Server Endpoint Attributes Attribute Description id Specifies a unique identifier that other configuration elements can use to refer to the endpoint. address Specifies the address of an HTTP endpoint. This value will override the value specified in the services contract. basePackages (Spring only) Enables auto-discovery, by specifying a comma-separated list of Java packages, which are searched to discover JAX-RS root resource classes and/or JAX-RS provider classes. beanNames Specifies a space-separated list of bean IDs of JAX-RS root resource beans. In the context of Spring XML, it is possible to define a root resource beans' lifecycle by setting the scope attribute on the root resource bean element. bindingId Specifies the ID of the message binding the service uses. A list of valid binding IDs is provided in Chapter 23, Apache CXF Binding IDs . bus Specifies the ID of the Spring bean configuring the bus used to manage the service endpoint. This is useful when configuring several endpoints to use a common set of features. docLocation Specifies the location of an external WADL document. modelRef Specifies a model schema as a classpath resource (for example, a URL of the form classpath:/path/to/model.xml ). For details of how to define a JAX-RS model schema, see Section 18.3, "Defining REST Services with the Model Schema" . publish Specifies if the service should be automatically published. If set to false , the developer must explicitly publish the endpoint. publishedEndpointUrl Specifies the URL base address, which gets inserted into the wadl:resources/@base attribute of the auto-generated WADL interface. serviceAnnotation (Spring only) Specifies the service annotation class name for auto-discovery in Spring. When used in combination with the basePackages property, this option restricts the collection of auto-discovered classes to include only the classes that are annotated by this annotation type. guess!! Is this correct? serviceClass Specifies the name of a JAX-RS root resource class (which implements a JAX-RS service). In this case, the class is instantiated by Apache CXF, not by Blueprint or Spring. If you want to instantiate the class in Blueprint or Spring, use the jaxrs:serviceBeans child element instead. serviceName Specifies the service QName (using the format ns : name ) for the JAX-RS endpoint in the special case where a JMS transport is used. For details, see the section called "Using the JMS transport" . staticSubresourceResolution If true , disables dynamic resolution of static sub-resources. Default is false . transportId For selecting a non-standard transport layer (in place of HTTP). In particular, you can select the JMS transport by setting this property to http://cxf.apache.org/transports/jms . For details, see the section called "Using the JMS transport" . abstract (Spring only) Specifies if the bean is an abstract bean. Abstract beans act as parents for concrete bean definitions and are not instantiated. The default is false . Setting this to true instructs the bean factory not to instantiate the bean. depends-on (Spring only) Specifies a list of beans that the endpoint depends on being instantiated before the endpoint can be instantiated. 18.1.3. jaxrs:server Child Elements Child elements Table 18.2, "JAX-RS Server Endpoint Child Elements" describes the child elements of the jaxrs:server element. Table 18.2. JAX-RS Server Endpoint Child Elements Element Description jaxrs:executor Specifies a Java Executor (thread pool implementation) that is used for the service. This is specified using an embedded bean definition. jaxrs:features Specifies a list of beans that configure advanced features of Apache CXF. You can provide either a list of bean references or a list of embedded beans. jaxrs:binding Not used. jaxrs:dataBinding Specifies the class implementing the data binding used by the endpoint. This is specified using an embedded bean definition. For more details, see the section called "Specifying the data binding" . jaxrs:inInterceptors Specifies a list of interceptors that process inbound requests. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:inFaultInterceptors Specifies a list of interceptors that process inbound fault messages. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:outInterceptors Specifies a list of interceptors that process outbound replies. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:outFaultInterceptors Specifies a list of interceptors that process outbound fault messages. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:invoker Specifies an implementation of the org.apache.cxf.service.Invoker interface used by the service. [a] jaxrs:serviceFactories Provides you with the maximum degree of control over the lifecycle of the JAX-RS root resources associated with this endpoint. The children of this element (which must be instances of org.apache.cxf.jaxrs.lifecycle.ResourceProvider type) are used to create JAX-RS root resource instances. jaxrs:properties Specifies a Spring map of properties that are passed along to the endpoint. These properties can be used to control features like enabling MTOM support. jaxrs:serviceBeans The children of this element are instances of ( bean element) or references to ( ref element) JAX-RS root resources. Note that in this case the scope attribute (Spring only) , if present in the bean element, is ignored. jaxrs:modelBeans Consists of a list of references to one or more org.apache.cxf.jaxrs.model.UserResource beans, which are the basic elements of a resource model (corresponding to jaxrs:resource elements). For details, see Section 18.3, "Defining REST Services with the Model Schema" . jaxrs:model Defines a resource model directly in this endpoint (that is, this jaxrs:model element can contain one or more jaxrs:resource elements). For details, see Section 18.3, "Defining REST Services with the Model Schema" . jaxrs:providers Enables you to register one or more custom JAX-RS providers with this endpoint. The children of this element are instances of ( bean element) or references to ( ref element) JAX-RS providers. jaxrs:extensionMappings When the URL of a REST invocation ends in a file extension, you can use this element to associate it automatically with a particular content type. For example, the .xml file extension could be associated with the application/xml content type. For details, see the section called "Extension mappings and language mappings" . jaxrs:languageMappings When the URL of a REST invocation ends in a language suffix, you can use this element to map this to a particular language. For example, the .en language suffix could be associated with the en-GB language. For details, see the section called "Extension mappings and language mappings" . jaxrs:schemaLocations Specifies one or more XML schemas used for validating XML message content. This element can contain one or more jaxrs:schemaLocation elements, each specifying the location of an XML schema file (usually as a classpath URL). For details, see the section called "Schema validation" . jaxrs:resourceComparator Enables you to register a custom resource comparator, which implements the algorithm used to match an incoming URL path to a particular resource class or method. jaxrs:resourceClasses (Blueprint only) Can be used instead of the jaxrs:server/@serviceClass attribute, if you want to create multiple resources from class names. The children of jaxrs:resourceClasses must be class elements with a name attribute set to the name of the resource class. In this case, the classes are instantiated by Apache CXF, not by Blueprint or Spring. [a] The Invoker implementation controls how a service is invoked. For example, it controls whether each request is handled by a new instance of the service implementation or if state is preserved across invocations. 18.2. Configuring JAX-RS Client Endpoints 18.2.1. Defining a JAX-RS Client Endpoint Injecting client proxies The main point of instantiating a client proxy bean in an XML language (Blueprint XML or Spring XML) is in order to inject it into another bean, which can then use the client proxy to invoke the REST service. To create a client proxy bean in XML, use the jaxrs:client element. Namespaces The JAX-RS client endpoint is defined using a different XML namespace from the server endpoint. The following table shows which namespace to use for which XML language: XML Language Namespace for client endpoint Blueprint http://cxf.apache.org/blueprint/jaxrs-client Spring http://cxf.apache.org/jaxrs-client Basic client endpoint definition The following example shows how to create a client proxy bean in Blueprint XML or Spring XML: Where you must set the following attributes to define the basic client endpoint: id The bean ID of the client proxy can be used to inject the client proxy into other beans in your XML configuration. address The address attribute specifies the base URL of the REST invocations. serviceClass The serviceClass attribute provides a description of the REST service by specifying a root resource class (annotated by @Path ). In fact, this is a server class, but it is not used directly by the client. The specified class is used only for its metadata (through Java reflection and JAX-RS annotations), which is used to construct the client proxy dynamically. Specifying headers You can add HTTP headers to the client proxy's invocations using the jaxrs:headers child elements, as follows: 18.2.2. jaxrs:client Attributes Attributes Table 18.3, "JAX-RS Client Endpoint Attributes" describes the attributes available on the jaxrs:client element. Table 18.3. JAX-RS Client Endpoint Attributes Attribute Description address Specifies the HTTP address of the endpoint where the consumer will make requests. This value overrides the value set in the contract. bindingId Specifies the ID of the message binding the consumer uses. A list of valid binding IDs is provided in Chapter 23, Apache CXF Binding IDs . bus Specifies the ID of the Spring bean configuring the bus managing the endpoint. inheritHeaders Specifies whether the headers set for this proxy will be inherited, if a subresource proxy is created from this proxy. Default is false . username Specifies the username used for simple username/password authentication. password Specifies the password used for simple username/password authentication. modelRef Specifies a model schema as a classpath resource (for example, a URL of the form classpath:/path/to/model.xml ). For details of how to define a JAX-RS model schema, see Section 18.3, "Defining REST Services with the Model Schema" . serviceClass Specifies the name of a service interface or a resource class (that is annotated with @PATH ), re-using it from the JAX-RS server implementation. In this case, the specified class is not invoked directly (it is actually a server class). The specified class is used only for its metadata (through Java reflection and JAX-RS annotations), which is used to construct the client proxy dynamically. serviceName Specifies the service QName (using the format ns : name ) for the JAX-RS endpoint in the special case where a JMS transport is used. For details, see the section called "Using the JMS transport" . threadSafe Specifies whether or not the client proxy is thread-safe. Default is false . transportId For selecting a non-standard transport layer (in place of HTTP). In particular, you can select the JMS transport by setting this property to http://cxf.apache.org/transports/jms . For details, see the section called "Using the JMS transport" . abstract (Spring only) Specifies if the bean is an abstract bean. Abstract beans act as parents for concrete bean definitions and are not instantiated. The default is false . Setting this to true instructs the bean factory not to instantiate the bean. depends-on (Spring only) Specifies a list of beans that the endpoint depends on being instantiated before it can be instantiated. 18.2.3. jaxrs:client Child Elements Child elements Table 18.4, "JAX-RS Client Endpoint Child Elements" describes the child elements of the jaxrs:client element. Table 18.4. JAX-RS Client Endpoint Child Elements Element Description jaxrs:executor jaxrs:features Specifies a list of beans that configure advanced features of Apache CXF. You can provide either a list of bean references or a list of embedded beans. jaxrs:binding Not used. jaxrs:dataBinding Specifies the class implementing the data binding used by the endpoint. This is specified using an embedded bean definition. For more details, see the section called "Specifying the data binding" . jaxrs:inInterceptors Specifies a list of interceptors that process inbound responses. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:inFaultInterceptors Specifies a list of interceptors that process inbound fault messages. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:outInterceptors Specifies a list of interceptors that process outbound requests. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:outFaultInterceptors Specifies a list of interceptors that process outbound fault messages. For more information see Part VII, "Developing Apache CXF Interceptors" . jaxrs:properties Specifies a map of properties that are passed to the endpoint. jaxrs:providers Enables you to register one or more custom JAX-RS providers with this endpoint. The children of this element are instances of ( bean element) or references to ( ref element) JAX-RS providers. jaxrs:modelBeans Consists of a list of references to one or more org.apache.cxf.jaxrs.model.UserResource beans, which are the basic elements of a resource model (corresponding to jaxrs:resource elements). For details, see Section 18.3, "Defining REST Services with the Model Schema" . jaxrs:model Defines a resource model directly in this endpoint (that is, a jaxrs:model element containing one or more jaxrs:resource elements). For details, see Section 18.3, "Defining REST Services with the Model Schema" . jaxrs:headers Used for setting headers on the outgoing message. For details, see the section called "Specifying headers" . jaxrs:schemaLocations Specifies one or more XML schemas used for validating XML message content. This element can contain one or more jaxrs:schemaLocation elements, each specifying the location of an XML schema file (usually as a classpath URL). For details, see the section called "Schema validation" . 18.3. Defining REST Services with the Model Schema RESTful services without annotations The JAX-RS model schema makes it possible to define RESTful services without annotating Java classes. That is, instead of adding annotations like @Path , @PathParam , @Consumes , @Produces , and so on, directly to a Java class (or interface), you can provide all of the relevant REST metadata in a separate XML file, using the model schema. This can be useful, for example, in cases where you are unable to modify the Java source that implements the service. Example model schema Example 18.1, "Sample JAX-RS Model Schema" shows an example of a model schema that defines service metadata for the BookStoreNoAnnotations root resource class. Example 18.1. Sample JAX-RS Model Schema Namespaces The XML namespace that you use to define a model schema depends on whether you are defining the corresponding JAX-RS endpoint in Blueprint XML or in Spring XML. The following table shows which namespace to use for which XML language: XML Language Namespace Blueprint http://cxf.apache.org/blueprint/jaxrs Spring http://cxf.apache.org/jaxrs How to attach a model schema to an endpoint To define and attach a model schema to an endpoint, perform the following steps: Define the model schema, using the appropriate XML namespace for your chosen injection platform (Blueprint XML or Spring XML). Add the model schema file to your project's resources, so that the schema file is available on the classpath in the final package (JAR, WAR, or OSGi bundle file). Note Alternatively, it is also possible to embed a model schema directly into a JAX-RS endpoint, using the endpoint's jaxrs:model child element. Configure the endpoint to use the model schema, by setting the endpoint's modelRef attribute to the location of the model schema on the classpath (using a classpath URL). If necessary, instantiate the root resources explicitly, using the jaxrs:serviceBeans element. You can skip this step, if the model schema references root resource classes directly (instead of referencing base interfaces). Configuration of model schema referencing a class If the model schema applies directly to root resource classes, there is no need to define any root resource beans using the jaxrs:serviceBeans element, because the model schema automatically instantiates the root resource beans. For example, given that customer-resources.xml is a model schema that associates metadata with customer resource classes, you could instantiate a customerService service endpoint as follows: Configuration of model schema referencing an interface If the model schema applies to Java interfaces (which are the base interfaces of the root resources), you must instantiate the root resource classes using the jaxrs:serviceBeans element in the endpoint. For example, given that customer-interfaces.xml is a model schema that associates metadata with customer interfaces, you could instantiate a customerService service endpoint as follows: Model Schema Reference A model schema is defined using the following XML elements: model Root element of the model schema. If you need to reference the model schema (for example, from a JAX-RS endpoint using the modelRef attribute), you should set the id attribute on this element. model/resource The resource element is used to associate metadata with a specific root resource class (or with a corresponding interface). You can define the following attributes on the resource element: Attribute Description + name The name of the resource class (or corresponding interface) to which this resource model is applied. + path The component of the REST URL path that maps to this resource. + consumes Specifies the content type (Internet media type) consumed by this resource-for example, application/xml or application/json . + produces Specifies the content type (Internet media type) produced by this resource-for example, application/xml or application/json . + model/resource/operation The operation element is used to associate metadata with Java methods. You can define the following attributes on an operation element: Attribute Description + name The name of the Java method to which this element is applied. + path The component of the REST URL path that maps to this method. This attribute value can include parameter references, for example: path="/books/{id}/chapter" , where {id} extracts the value of the id parameter from the path. + verb Specifies the HTTP verb that maps to this method. Typically one of: GET , POST , PUT , or DELETE . If the HTTP verb is not specified, it is assumed that the Java method is a sub-resource locater , which returns a reference to a sub-resource object (where the sub-resource class must also be provided with metadata using a resource element). + consumes Specifies the content type (Internet media type) consumed by this operation-for example, application/xml or application/json . + produces Specifies the content type (Internet media type) produced by this operation-for example, application/xml or application/json . + oneway If true , configures the operation to be oneway , meaning that no reply message is needed. Defaults to false . + model/resource/operation/param The param element is used extract a value from the REST URL and inject it into one of the method parameters. You can define the following attributes on a param element: Attribute Description + name The name of the Java method parameter to which this element is applied. + type Specifies how the parameter value is extracted from the REST URL or message. It can be set to one of the following values: PATH , QUERY , MATRIX , HEADER , COOKIE , FORM , CONTEXT , REQUEST_BODY . + defaultValue Default value to inject into the parameter, in case a value could not be extracted from the REST URL or message. + encoded If true , the parameter value is injected in its URI encoded form (that is, using %nn encoding). Default is false . For example, when extracting a parameter from the URL path, /name/Joe%20Bloggs with encoded set to true , the parameter is injected as Joe%20Bloggs ; otherwise, the parameter would be injected as Joe Bloggs . +
[ "<blueprint xmlns=\"http://www.osgi.org/xmlns/blueprint/v1.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:jaxrs=\"http://cxf.apache.org/blueprint/jaxrs\" xmlns:cxf=\"http://cxf.apache.org/blueprint/core\" xsi:schemaLocation=\" http://www.osgi.org/xmlns/blueprint/v1.0.0 https://www.osgi.org/xmlns/blueprint/v1.0.0/blueprint.xsd http://cxf.apache.org/blueprint/jaxrs http://cxf.apache.org/schemas/blueprint/jaxrs.xsd http://cxf.apache.org/blueprint/core http://cxf.apache.org/schemas/blueprint/core.xsd \"> <cxf:bus> <cxf:features> <cxf:logging/> </cxf:features> </cxf:bus> <jaxrs:server id=\"customerService\" address=\"/customers\"> <jaxrs:serviceBeans> <ref component-id=\"serviceBean\" /> </jaxrs:serviceBeans> </jaxrs:server> <bean id=\"serviceBean\" class=\"service.CustomerService\"/> </blueprint>", "<beans xmlns=\"http://www.springframework.org/schema/beans\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:jaxrs=\"http://cxf.apache.org/jaxrs\" xsi:schemaLocation=\" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.0.xsd http://cxf.apache.org/jaxrs http://cxf.apache.org/schemas/jaxrs.xsd\"> <jaxrs:server id=\"customerService\" address=\"/customers\"> <jaxrs:serviceBeans> <ref bean=\"serviceBean\"/> </jaxrs:serviceBeans> </jaxrs:server> <bean id=\"serviceBean\" class=\"service.CustomerService\"/> </beans>", "<jaxrs:server address=\"/customers\" basePackages=\"a.b.c\"/>", "<beans ... > <jaxrs:server id=\"customerService\" address=\"/service1\" beanNames=\"customerBean1 customerBean2\"/> <bean id=\"customerBean1\" class=\"demo.jaxrs.server.CustomerRootResource1\" scope=\"prototype\"/> <bean id=\"customerBean2\" class=\"demo.jaxrs.server.CustomerRootResource2\" scope=\"prototype\"/> </beans>", "<beans ... > <jaxrs:server id=\"customerService\" address=\"/service1\"> <jaxrs:serviceFactories> <ref bean=\"sfactory1\" /> <ref bean=\"sfactory2\" /> </jaxrs:serviceFactories> </jaxrs:server> <bean id=\"sfactory1\" class=\"org.apache.cxf.jaxrs.spring.SpringResourceFactory\"> <property name=\"beanId\" value=\"customerBean1\"/> </bean> <bean id=\"sfactory2\" class=\"org.apache.cxf.jaxrs.spring.SpringResourceFactory\"> <property name=\"beanId\" value=\"customerBean2\"/> </bean> <bean id=\"customerBean1\" class=\"demo.jaxrs.server.CustomerRootResource1\" scope=\"prototype\"/> <bean id=\"customerBean2\" class=\"demo.jaxrs.server.CustomerRootResource2\" scope=\"prototype\"/> </beans>", "<jaxrs:server address=\"/rest\" docLocation=\"wadl/bookStore.wadl\"> <jaxrs:serviceBeans> <bean class=\"org.bar.generated.BookStore\"/> </jaxrs:serviceBeans> </jaxrs:server>", "<jaxrs:server address=\"/rest\" docLocation=\"wadl/bookStore.wadl\"> <jaxrs:serviceBeans> <bean class=\"org.bar.generated.BookStore\"/> </jaxrs:serviceBeans> <jaxrs:schemaLocations> <jaxrs:schemaLocation>classpath:/schemas/a.xsd</jaxrs:schemaLocation> <jaxrs:schemaLocation>classpath:/schemas/b.xsd</jaxrs:schemaLocation> </jaxrs:schemaLocations> </jaxrs:server>", "<jaxrs:server address=\"/rest\" docLocation=\"wadl/bookStore.wadl\"> <jaxrs:serviceBeans> <bean class=\"org.bar.generated.BookStore\"/> </jaxrs:serviceBeans> <jaxrs:schemaLocations> <jaxrs:schemaLocation>classpath:/schemas/</jaxrs:schemaLocation> </jaxrs:schemaLocations> </jaxrs:server>", "<jaxrs:server id=\"jaxbbook\" address=\"/jaxb\"> <jaxrs:serviceBeans> <ref bean=\"serviceBean\" /> </jaxrs:serviceBeans> <jaxrs:dataBinding> <bean class=\"org.apache.cxf.jaxb.JAXBDataBinding\"/> </jaxrs:dataBinding> </jaxrs:server>>", "<jaxrs:server id=\"aegisbook\" address=\"/aegis\"> <jaxrs:serviceBeans> <ref bean=\"serviceBean\" /> </jaxrs:serviceBeans> <jaxrs:dataBinding> <bean class=\"org.apache.cxf.aegis.databinding.AegisDatabinding\"> <property name=\"aegisContext\"> <bean class=\"org.apache.cxf.aegis.AegisContext\"> <property name=\"writeXsiTypes\" value=\"true\"/> </bean> </property> </bean> </jaxrs:dataBinding> </jaxrs:server>", "<?xml version=\"1.0\" encoding=\"UTF-8\"?> <beans xmlns=\"http://www.springframework.org/schema/beans\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:jms=\"http://cxf.apache.org/transports/jms\" xmlns:jaxrs=\"http://cxf.apache.org/jaxrs\" xsi:schemaLocation=\" http://cxf.apache.org/transports/jms http://cxf.apache.org/schemas/configuration/jms.xsd http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://cxf.apache.org/jaxrs http://cxf.apache.org/schemas/jaxrs.xsd\"> <bean class=\"org.springframework.beans.factory.config.PropertyPlaceholderConfigurer\"/> <bean id=\"ConnectionFactory\" class=\"org.apache.activemq.ActiveMQConnectionFactory\"> <property name=\"brokerURL\" value=\"tcp://localhost:USD{testutil.ports.EmbeddedJMSBrokerLauncher}\" /> </bean> <jaxrs:server xmlns:s=\"http://books.com\" serviceName=\"s:BookService\" transportId= \"http://cxf.apache.org/transports/jms\" address=\"jms:queue:test.jmstransport.text?replyToName=test.jmstransport.response\"> <jaxrs:serviceBeans> <bean class=\"org.apache.cxf.systest.jaxrs.JMSBookStore\"/> </jaxrs:serviceBeans> </jaxrs:server> </beans>", "GET /resource.xml", "<jaxrs:server id=\"customerService\" address=\"/\"> <jaxrs:serviceBeans> <bean class=\"org.apache.cxf.jaxrs.systests.CustomerService\" /> </jaxrs:serviceBeans> <jaxrs:extensionMappings> <entry key=\"json\" value=\"application/json\"/> <entry key=\"xml\" value=\"application/xml\"/> </jaxrs:extensionMappings> </jaxrs:server>", "GET /resource.en", "<jaxrs:server id=\"customerService\" address=\"/\"> <jaxrs:serviceBeans> <bean class=\"org.apache.cxf.jaxrs.systests.CustomerService\" /> </jaxrs:serviceBeans> <jaxrs:languageMappings> <entry key=\"en\" value=\"en-gb\"/> </jaxrs:languageMappings> </jaxrs:server>", "<jaxrs:client id=\"restClient\" address=\"http://localhost:8080/test/services/rest\" serviceClass=\"org.apache.cxf.systest.jaxrs.BookStoreJaxrsJaxws\"/>", "<jaxrs:client id=\"restClient\" address=\"http://localhost:8080/test/services/rest\" serviceClass=\"org.apache.cxf.systest.jaxrs.BookStoreJaxrsJaxws\" inheritHeaders=\"true\"> <jaxrs:headers> <entry key=\"Accept\" value=\"text/xml\"/> </jaxrs:headers> </jaxrs:client>", "<model xmlns=\"http://cxf.apache.org/jaxrs\"> <resource name=\"org.apache.cxf.systest.jaxrs.BookStoreNoAnnotations\" path=\"bookstore\" produces=\"application/json\" consumes=\"application/json\"> <operation name=\"getBook\" verb=\"GET\" path=\"/books/{id}\" produces=\"application/xml\"> <param name=\"id\" type=\"PATH\"/> </operation> <operation name=\"getBookChapter\" path=\"/books/{id}/chapter\"> <param name=\"id\" type=\"PATH\"/> </operation> <operation name=\"updateBook\" verb=\"PUT\"> <param name=\"book\" type=\"REQUEST_BODY\"/> </operation> </resource> <resource name=\"org.apache.cxf.systest.jaxrs.ChapterNoAnnotations\"> <operation name=\"getItself\" verb=\"GET\"/> <operation name=\"updateChapter\" verb=\"PUT\" consumes=\"application/xml\"> <param name=\"content\" type=\"REQUEST_BODY\"/> </operation> </resource> </model>", "<jaxrs:server id=\"customerService\" address=\"/customers\" modelRef=\"classpath:/org/example/schemas/customer-resources.xml\" />", "<jaxrs:server id=\"customerService\" address=\"/customers\" modelRef=\"classpath:/org/example/schemas/customer-interfaces.xml\"> <jaxrs:serviceBeans> <ref component-id=\"serviceBean\" /> </jaxrs:serviceBeans> </jaxrs:server> <bean id=\"serviceBean\" class=\"service.CustomerService\"/>" ]
https://docs.redhat.com/en/documentation/red_hat_fuse/7.13/html/apache_cxf_development_guide/JAXRSEndpointConfig
Chapter 1. Preparing to install on IBM Power Virtual Server
Chapter 1. Preparing to install on IBM Power Virtual Server The installation workflows documented in this section are for IBM Power(R) Virtual Server infrastructure environments. 1.1. Prerequisites You reviewed details about the OpenShift Container Platform installation and update processes. You read the documentation on selecting a cluster installation method and preparing it for users . Important IBM Power(R) Virtual Server using installer-provisioned infrastructure is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . 1.2. Requirements for installing OpenShift Container Platform on IBM Power Virtual Server Before installing OpenShift Container Platform on IBM Power(R) Virtual Server you must create a service account and configure an IBM Cloud(R) account. See Configuring an IBM Cloud(R) account for details about creating an account, configuring DNS and supported IBM Power(R) Virtual Server regions. You must manually manage your cloud credentials when installing a cluster to IBM Power(R) Virtual Server. Do this by configuring the Cloud Credential Operator (CCO) for manual mode before you install the cluster. 1.3. Choosing a method to install OpenShift Container Platform on IBM Power Virtual Server You can install OpenShift Container Platform on IBM Power(R) Virtual Server using installer-provisioned infrastructure. This process involves using an installation program to provision the underlying infrastructure for your cluster. Installing OpenShift Container Platform on IBM Power(R) Virtual Server using user-provisioned infrastructure is not supported at this time. See Installation process for more information about installer-provisioned installation processes. 1.3.1. Installing a cluster on installer-provisioned infrastructure You can install a cluster on IBM Power(R) Virtual Server infrastructure that is provisioned by the OpenShift Container Platform installation program by using one of the following methods: Installing a customized cluster on IBM Power(R) Virtual Server : You can install a customized cluster on IBM Power(R) Virtual Server infrastructure that the installation program provisions. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available post-installation . Installing a cluster on IBM Power(R) Virtual Server into an existing VPC : You can install OpenShift Container Platform on IBM Power(R) Virtual Server into an existing Virtual Private Cloud (VPC). You can use this installation method if you have constraints set by the guidelines of your company, such as limits when creating new accounts or infrastructure. Installing a private cluster on IBM Power(R) Virtual Server : You can install a private cluster on IBM Power(R) Virtual Server. You can use this method to deploy OpenShift Container Platform on an internal network that is not visible to the internet. Installing a cluster on IBM Power(R) Virtual Server in a restricted network : You can install OpenShift Container Platform on IBM Power(R) Virtual Server on installer-provisioned infrastructure by using an internal mirror of the installation release content. You can use this method to install a cluster that does not require an active internet connection to obtain the software components. 1.4. Configuring the Cloud Credential Operator utility The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). To install a cluster on IBM Power(R) Virtual Server, you must set the CCO to manual mode as part of the installation process. To create and manage cloud credentials from outside of the cluster when the Cloud Credential Operator (CCO) is operating in manual mode, extract and prepare the CCO utility ( ccoctl ) binary. Note The ccoctl utility is a Linux binary that must run in a Linux environment. Prerequisites You have access to an OpenShift Container Platform account with cluster administrator access. You have installed the OpenShift CLI ( oc ). Procedure Set a variable for the OpenShift Container Platform release image by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Obtain the CCO container image from the OpenShift Container Platform release image by running the following command: USD CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret) Note Ensure that the architecture of the USDRELEASE_IMAGE matches the architecture of the environment in which you will use the ccoctl tool. Extract the ccoctl binary from the CCO container image within the OpenShift Container Platform release image by running the following command: USD oc image extract USDCCO_IMAGE --file="/usr/bin/ccoctl" -a ~/.pull-secret Change the permissions to make ccoctl executable by running the following command: USD chmod 775 ccoctl Verification To verify that ccoctl is ready to use, display the help file. Use a relative file name when you run the command, for example: USD ./ccoctl.rhel9 Example output OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: alibabacloud Manage credentials objects for alibaba cloud aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for IBM Cloud nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use "ccoctl [command] --help" for more information about a command. Additional resources Rotating API keys 1.5. steps Configuring an IBM Cloud(R) account
[ "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl\" -a ~/.pull-secret", "chmod 775 ccoctl", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: alibabacloud Manage credentials objects for alibaba cloud aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for IBM Cloud nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command." ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.14/html/installing_on_ibm_power_virtual_server/preparing-to-install-on-ibm-power-vs
Chapter 4. Upgrading Red Hat build of Keycloak adapters
Chapter 4. Upgrading Red Hat build of Keycloak adapters After you upgrade the Red Hat build of Keycloak server, you can upgrade the adapters. Earlier versions of the adapter might work with later versions of the Red Hat build of Keycloak server, but earlier versions of the Red Hat build of Keycloak server might not work with later versions of the adapter. 4.1. Compatibility with older adapters Newer versions of the Red Hat build of Keycloak server potentially work with older versions of the adapters. However, some fixes of the Red Hat build of Keycloak server may break compatibility with older versions of the adapters. For example, a new implementation of the OpenID Connect specification may not match older client adapter versions. For this situation, you can use Compatibility modes. For OpenID Connect clients, the Admin Console includes OpenID Connect Compatibility Modes on the page with client details. With this option, you can disable some new aspects of the Red Hat build of Keycloak server to preserve compatibility with older client adapters. For more details, see the tool tips of individual switches. 4.2. Upgrading the EAP adapter To upgrade the JBoss EAP adapter, complete the following steps: Procedure Download the new adapter archive. Remove the adapter modules by deleting the EAP_HOME/modules/system/add-ons/keycloak/ directory. Unzip the downloaded archive into EAP_HOME . 4.3. Upgrading the JavaScript adapter To upgrade a JavaScript adapter, install the latest version from NPM . Procedure npm install keycloak-js@latest 4.4. Upgrading the Node.js adapter To upgrade a Node.js adapter that has been copied to your web application, perform the following procedure. Procedure Download the new adapter archive. Remove the existing Node.js adapter directory Unzip the updated file into its place Change the dependency for keycloak-connect in the package.json of your application
null
https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html/upgrading_guide/upgrading_red_hat_build_of_keycloak_adapters
Making open source more inclusive
Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message .
null
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/spine_leaf_networking/making-open-source-more-inclusive
Chapter 9. Installing a cluster on AWS into a government region
Chapter 9. Installing a cluster on AWS into a government region In OpenShift Container Platform version 4.14, you can install a cluster on Amazon Web Services (AWS) into a government region. To configure the region, modify parameters in the install-config.yaml file before you install the cluster. 9.1. Prerequisites You reviewed details about the OpenShift Container Platform installation and update processes. You read the documentation on selecting a cluster installation method and preparing it for users . You configured an AWS account to host the cluster. Important If you have an AWS profile stored on your computer, it must not use a temporary session token that you generated while using a multi-factor authentication device. The cluster continues to use your current AWS credentials to create AWS resources for the entire life of the cluster, so you must use long-term credentials. To generate appropriate keys, see Managing Access Keys for IAM Users in the AWS documentation. You can supply the keys when you run the installation program. If you use a firewall, you configured it to allow the sites that your cluster requires access to. 9.2. AWS government regions OpenShift Container Platform supports deploying a cluster to an AWS GovCloud (US) region. The following AWS GovCloud partitions are supported: us-gov-east-1 us-gov-west-1 9.3. Installation requirements Before you can install the cluster, you must: Provide an existing private AWS VPC and subnets to host the cluster. Public zones are not supported in Route 53 in AWS GovCloud. As a result, clusters must be private when you deploy to an AWS government region. Manually create the installation configuration file ( install-config.yaml ). 9.4. Private clusters You can deploy a private OpenShift Container Platform cluster that does not expose external endpoints. Private clusters are accessible from only an internal network and are not visible to the internet. Note Public zones are not supported in Route 53 in an AWS GovCloud Region. Therefore, clusters must be private if they are deployed to an AWS GovCloud Region. By default, OpenShift Container Platform is provisioned to use publicly-accessible DNS and endpoints. A private cluster sets the DNS, Ingress Controller, and API server to private when you deploy your cluster. This means that the cluster resources are only accessible from your internal network and are not visible to the internet. Important If the cluster has any public subnets, load balancer services created by administrators might be publicly accessible. To ensure cluster security, verify that these services are explicitly annotated as private. To deploy a private cluster, you must: Use existing networking that meets your requirements. Your cluster resources might be shared between other clusters on the network. Deploy from a machine that has access to: The API services for the cloud to which you provision. The hosts on the network that you provision. The internet to obtain installation media. You can use any machine that meets these access requirements and follows your company's guidelines. For example, this machine can be a bastion host on your cloud network or a machine that has access to the network through a VPN. 9.4.1. Private clusters in AWS To create a private cluster on Amazon Web Services (AWS), you must provide an existing private VPC and subnets to host the cluster. The installation program must also be able to resolve the DNS records that the cluster requires. The installation program configures the Ingress Operator and API server for access from only the private network. The cluster still requires access to internet to access the AWS APIs. The following items are not required or created when you install a private cluster: Public subnets Public load balancers, which support public ingress A public Route 53 zone that matches the baseDomain for the cluster The installation program does use the baseDomain that you specify to create a private Route 53 zone and the required records for the cluster. The cluster is configured so that the Operators do not create public records for the cluster and all cluster machines are placed in the private subnets that you specify. 9.4.1.1. Limitations The ability to add public functionality to a private cluster is limited. You cannot make the Kubernetes API endpoints public after installation without taking additional actions, including creating public subnets in the VPC for each availability zone in use, creating a public load balancer, and configuring the control plane security groups to allow traffic from the internet on 6443 (Kubernetes API port). If you use a public Service type load balancer, you must tag a public subnet in each availability zone with kubernetes.io/cluster/<cluster-infra-id>: shared so that AWS can use them to create public load balancers. 9.5. About using a custom VPC In OpenShift Container Platform 4.14, you can deploy a cluster into existing subnets in an existing Amazon Virtual Private Cloud (VPC) in Amazon Web Services (AWS). By deploying OpenShift Container Platform into an existing AWS VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. If you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself, use this installation option. Because the installation program cannot know what other components are also in your existing subnets, it cannot choose subnet CIDRs and so forth on your behalf. You must configure networking for the subnets that you install your cluster to yourself. 9.5.1. Requirements for using your VPC The installation program no longer creates the following components: Internet gateways NAT gateways Subnets Route tables VPCs VPC DHCP options VPC endpoints Note The installation program requires that you use the cloud-provided DNS server. Using a custom DNS server is not supported and causes the installation to fail. If you use a custom VPC, you must correctly configure it and its subnets for the installation program and the cluster to use. See Amazon VPC console wizard configurations and Work with VPCs and subnets in the AWS documentation for more information on creating and managing an AWS VPC. The installation program cannot: Subdivide network ranges for the cluster to use. Set route tables for the subnets. Set VPC options like DHCP. You must complete these tasks before you install the cluster. See VPC networking components and Route tables for your VPC for more information on configuring networking in an AWS VPC. Your VPC must meet the following characteristics: The VPC must not use the kubernetes.io/cluster/.*: owned , Name , and openshift.io/cluster tags. The installation program modifies your subnets to add the kubernetes.io/cluster/.*: shared tag, so your subnets must have at least one free tag slot available for it. See Tag Restrictions in the AWS documentation to confirm that the installation program can add a tag to each subnet that you specify. You cannot use a Name tag, because it overlaps with the EC2 Name field and the installation fails. You must enable the enableDnsSupport and enableDnsHostnames attributes in your VPC, so that the cluster can use the Route 53 zones that are attached to the VPC to resolve cluster's internal DNS records. See DNS Support in Your VPC in the AWS documentation. If you prefer to use your own Route 53 hosted private zone, you must associate the existing hosted zone with your VPC prior to installing a cluster. You can define your hosted zone using the platform.aws.hostedZone and platform.aws.hostedZoneRole fields in the install-config.yaml file. You can use a private hosted zone from another account by sharing it with the account where you install the cluster. If you use a private hosted zone from another account, you must use the Passthrough or Manual credentials mode. If you are working in a disconnected environment, you are unable to reach the public IP addresses for EC2, ELB, and S3 endpoints. Depending on the level to which you want to restrict internet traffic during the installation, the following configuration options are available: Option 1: Create VPC endpoints Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: ec2.<aws_region>.amazonaws.com elasticloadbalancing.<aws_region>.amazonaws.com s3.<aws_region>.amazonaws.com With this option, network traffic remains private between your VPC and the required AWS services. Option 2: Create a proxy without VPC endpoints As part of the installation process, you can configure an HTTP or HTTPS proxy. With this option, internet traffic goes through the proxy to reach the required AWS services. Option 3: Create a proxy with VPC endpoints As part of the installation process, you can configure an HTTP or HTTPS proxy with VPC endpoints. Create a VPC endpoint and attach it to the subnets that the clusters are using. Name the endpoints as follows: ec2.<aws_region>.amazonaws.com elasticloadbalancing.<aws_region>.amazonaws.com s3.<aws_region>.amazonaws.com When configuring the proxy in the install-config.yaml file, add these endpoints to the noProxy field. With this option, the proxy prevents the cluster from accessing the internet directly. However, network traffic remains private between your VPC and the required AWS services. Required VPC components You must provide a suitable VPC and subnets that allow communication to your machines. Component AWS type Description VPC AWS::EC2::VPC AWS::EC2::VPCEndpoint You must provide a public VPC for the cluster to use. The VPC uses an endpoint that references the route tables for each subnet to improve communication with the registry that is hosted in S3. Public subnets AWS::EC2::Subnet AWS::EC2::SubnetNetworkAclAssociation Your VPC must have public subnets for between 1 and 3 availability zones and associate them with appropriate Ingress rules. Internet gateway AWS::EC2::InternetGateway AWS::EC2::VPCGatewayAttachment AWS::EC2::RouteTable AWS::EC2::Route AWS::EC2::SubnetRouteTableAssociation AWS::EC2::NatGateway AWS::EC2::EIP You must have a public internet gateway, with public routes, attached to the VPC. In the provided templates, each public subnet has a NAT gateway with an EIP address. These NAT gateways allow cluster resources, like private subnet instances, to reach the internet and are not required for some restricted network or proxy scenarios. Network access control AWS::EC2::NetworkAcl AWS::EC2::NetworkAclEntry You must allow the VPC to access the following ports: Port Reason 80 Inbound HTTP traffic 443 Inbound HTTPS traffic 22 Inbound SSH traffic 1024 - 65535 Inbound ephemeral traffic 0 - 65535 Outbound ephemeral traffic Private subnets AWS::EC2::Subnet AWS::EC2::RouteTable AWS::EC2::SubnetRouteTableAssociation Your VPC can have private subnets. The provided CloudFormation templates can create private subnets for between 1 and 3 availability zones. If you use private subnets, you must provide appropriate routes and tables for them. 9.5.2. VPC validation To ensure that the subnets that you provide are suitable, the installation program confirms the following data: All the subnets that you specify exist. You provide private subnets. The subnet CIDRs belong to the machine CIDR that you specified. You provide subnets for each availability zone. Each availability zone contains no more than one public and one private subnet. If you use a private cluster, provide only a private subnet for each availability zone. Otherwise, provide exactly one public and private subnet for each availability zone. You provide a public subnet for each private subnet availability zone. Machines are not provisioned in availability zones that you do not provide private subnets for. If you destroy a cluster that uses an existing VPC, the VPC is not deleted. When you remove the OpenShift Container Platform cluster from a VPC, the kubernetes.io/cluster/.*: shared tag is removed from the subnets that it used. 9.5.3. Division of permissions Starting with OpenShift Container Platform 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resource in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or ingress rules. The AWS credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as ELBs, security groups, S3 buckets, and nodes. 9.5.4. Isolation between clusters If you deploy OpenShift Container Platform to an existing network, the isolation of cluster services is reduced in the following ways: You can install multiple OpenShift Container Platform clusters in the same VPC. ICMP ingress is allowed from the entire network. TCP 22 ingress (SSH) is allowed to the entire network. Control plane TCP 6443 ingress (Kubernetes API) is allowed to the entire network. Control plane TCP 22623 ingress (MCS) is allowed to the entire network. 9.5.5. AWS security groups By default, the installation program creates and attaches security groups to control plane and compute machines. The rules associated with the default security groups cannot be modified. However, you can apply additional existing AWS security groups, which are associated with your existing VPC, to control plane and compute machines. Applying custom security groups can help you meet the security needs of your organization, in such cases where you need to control the incoming or outgoing traffic of these machines. As part of the installation process, you apply custom security groups by modifying the install-config.yaml file before deploying the cluster. For more information, see "Applying existing AWS security groups to the cluster". 9.6. Internet access for OpenShift Container Platform In OpenShift Container Platform 4.14, you require access to the internet to install your cluster. You must have internet access to: Access OpenShift Cluster Manager to download the installation program and perform subscription management. If the cluster has internet access and you do not disable Telemetry, that service automatically entitles your cluster. Access Quay.io to obtain the packages that are required to install your cluster. Obtain the packages that are required to perform cluster updates. Important If your cluster cannot have direct internet access, you can perform a restricted network installation on some types of infrastructure that you provision. During that process, you download the required content and use it to populate a mirror registry with the installation packages. With some installation types, the environment that you install your cluster in will not require internet access. Before you update the cluster, you update the content of the mirror registry. 9.7. Generating a key pair for cluster node SSH access During an OpenShift Container Platform installation, you can provide an SSH public key to the installation program. The key is passed to the Red Hat Enterprise Linux CoreOS (RHCOS) nodes through their Ignition config files and is used to authenticate SSH access to the nodes. The key is added to the ~/.ssh/authorized_keys list for the core user on each node, which enables password-less authentication. After the key is passed to the nodes, you can use the key pair to SSH in to the RHCOS nodes as the user core . To access the nodes through SSH, the private key identity must be managed by SSH for your local user. If you want to SSH in to your cluster nodes to perform installation debugging or disaster recovery, you must provide the SSH public key during the installation process. The ./openshift-install gather command also requires the SSH public key to be in place on the cluster nodes. Important Do not skip this procedure in production environments, where disaster recovery and debugging is required. Note You must use a local key, not one that you configured with platform-specific approaches such as AWS key pairs . Procedure If you do not have an existing SSH key pair on your local machine to use for authentication onto your cluster nodes, create one. For example, on a computer that uses a Linux operating system, run the following command: USD ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1 1 Specify the path and file name, such as ~/.ssh/id_ed25519 , of the new SSH key. If you have an existing key pair, ensure your public key is in the your ~/.ssh directory. Note If you plan to install an OpenShift Container Platform cluster that uses the RHEL cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64 , ppc64le , and s390x architectures, do not create a key that uses the ed25519 algorithm. Instead, create a key that uses the rsa or ecdsa algorithm. View the public SSH key: USD cat <path>/<file_name>.pub For example, run the following to view the ~/.ssh/id_ed25519.pub public key: USD cat ~/.ssh/id_ed25519.pub Add the SSH private key identity to the SSH agent for your local user, if it has not already been added. SSH agent management of the key is required for password-less SSH authentication onto your cluster nodes, or if you want to use the ./openshift-install gather command. Note On some distributions, default SSH private key identities such as ~/.ssh/id_rsa and ~/.ssh/id_dsa are managed automatically. If the ssh-agent process is not already running for your local user, start it as a background task: USD eval "USD(ssh-agent -s)" Example output Agent pid 31874 Note If your cluster is in FIPS mode, only use FIPS-compliant algorithms to generate the SSH key. The key must be either RSA or ECDSA. Add your SSH private key to the ssh-agent : USD ssh-add <path>/<file_name> 1 1 Specify the path and file name for your SSH private key, such as ~/.ssh/id_ed25519 Example output Identity added: /home/<you>/<path>/<file_name> (<computer_name>) steps When you install OpenShift Container Platform, provide the SSH public key to the installation program. 9.8. Obtaining an AWS Marketplace image If you are deploying an OpenShift Container Platform cluster using an AWS Marketplace image, you must first subscribe through AWS. Subscribing to the offer provides you with the AMI ID that the installation program uses to deploy worker nodes. Prerequisites You have an AWS account to purchase the offer. This account does not have to be the same account that is used to install the cluster. Procedure Complete the OpenShift Container Platform subscription from the AWS Marketplace . Record the AMI ID for your specific region. As part of the installation process, you must update the install-config.yaml file with this value before deploying the cluster. Sample install-config.yaml file with AWS Marketplace worker nodes apiVersion: v1 baseDomain: example.com compute: - hyperthreading: Enabled name: worker platform: aws: amiID: ami-06c4d345f7c207239 1 type: m5.4xlarge replicas: 3 metadata: name: test-cluster platform: aws: region: us-east-2 2 sshKey: ssh-ed25519 AAAA... pullSecret: '{"auths": ...}' 1 The AMI ID from your AWS Marketplace subscription. 2 Your AMI ID is associated with a specific AWS region. When creating the installation configuration file, ensure that you select the same AWS region that you specified when configuring your subscription. 9.9. Obtaining the installation program Before you install OpenShift Container Platform, download the installation file on the host you are using for installation. Prerequisites You have a computer that runs Linux or macOS, with at least 1.2 GB of local disk space. Procedure Go to the Cluster Type page on the Red Hat Hybrid Cloud Console. If you have a Red Hat account, log in with your credentials. If you do not, create an account. Select your infrastructure provider from the Run it yourself section of the page. Select your host operating system and architecture from the dropdown menus under OpenShift Installer and click Download Installer . Place the downloaded file in the directory where you want to store the installation configuration files. Important The installation program creates several files on the computer that you use to install your cluster. You must keep the installation program and the files that the installation program creates after you finish installing the cluster. Both of the files are required to delete the cluster. Deleting the files created by the installation program does not remove your cluster, even if the cluster failed during installation. To remove your cluster, complete the OpenShift Container Platform uninstallation procedures for your specific cloud provider. Extract the installation program. For example, on a computer that uses a Linux operating system, run the following command: USD tar -xvf openshift-install-linux.tar.gz Download your installation pull secret from Red Hat OpenShift Cluster Manager . This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for OpenShift Container Platform components. Tip Alternatively, you can retrieve the installation program from the Red Hat Customer Portal , where you can specify a version of the installation program to download. However, you must have an active subscription to access this page. 9.10. Manually creating the installation configuration file Installing the cluster requires that you manually generate the installation configuration file. Prerequisites You have an SSH public key on your local machine to provide to the installation program. The key will be used for SSH authentication onto your cluster nodes for debugging and disaster recovery. You have obtained the OpenShift Container Platform installation program and the pull secret for your cluster. Procedure Create an installation directory to store your required installation assets in: USD mkdir <installation_directory> Important You must create a directory. Some installation assets, like bootstrap X.509 certificates have short expiration intervals, so you must not reuse an installation directory. If you want to reuse individual files from another cluster installation, you can copy them into your directory. However, the file names for the installation assets might change between releases. Use caution when copying installation files from an earlier OpenShift Container Platform version. Customize the sample install-config.yaml file template that is provided and save it in the <installation_directory> . Note You must name this configuration file install-config.yaml . Back up the install-config.yaml file so that you can use it to install multiple clusters. Important The install-config.yaml file is consumed during the step of the installation process. You must back it up now. Additional resources Installation configuration parameters for AWS 9.10.1. Minimum resource requirements for cluster installation Each cluster machine must meet the following minimum requirements: Table 9.1. Minimum resource requirements Machine Operating System vCPU [1] Virtual RAM Storage Input/Output Per Second (IOPS) [2] Bootstrap RHCOS 4 16 GB 100 GB 300 Control plane RHCOS 4 16 GB 100 GB 300 Compute RHCOS, RHEL 8.6 and later [3] 2 8 GB 100 GB 300 One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or Hyper-Threading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: (threads per core x cores) x sockets = vCPUs. OpenShift Container Platform and Kubernetes are sensitive to disk performance, and faster storage is recommended, particularly for etcd on the control plane nodes which require a 10 ms p99 fsync duration. Note that on many cloud platforms, storage size and IOPS scale together, so you might need to over-allocate storage volume to obtain sufficient performance. As with all user-provisioned installations, if you choose to use RHEL compute machines in your cluster, you take responsibility for all operating system life cycle management and maintenance, including performing system updates, applying patches, and completing all other required tasks. Use of RHEL 7 compute machines is deprecated and has been removed in OpenShift Container Platform 4.10 and later. Note As of OpenShift Container Platform version 4.13, RHCOS is based on RHEL version 9.2, which updates the micro-architecture requirements. The following list contains the minimum instruction set architectures (ISA) that each architecture requires: x86-64 architecture requires x86-64-v2 ISA ARM64 architecture requires ARMv8.0-A ISA IBM Power architecture requires Power 9 ISA s390x architecture requires z14 ISA For more information, see RHEL Architectures . If an instance type for your platform meets the minimum requirements for cluster machines, it is supported to use in OpenShift Container Platform. Additional resources Optimizing storage 9.10.2. Tested instance types for AWS The following Amazon Web Services (AWS) instance types have been tested with OpenShift Container Platform. Note Use the machine types included in the following charts for your AWS instances. If you use an instance type that is not listed in the chart, ensure that the instance size you use matches the minimum resource requirements that are listed in "Minimum resource requirements for cluster installation". Example 9.1. Machine types based on 64-bit x86 architecture c4.* c5.* c5a.* i3.* m4.* m5.* m5a.* m6i.* r4.* r5.* r5a.* r6i.* t3.* t3a.* 9.10.3. Tested instance types for AWS on 64-bit ARM infrastructures The following Amazon Web Services (AWS) 64-bit ARM instance types have been tested with OpenShift Container Platform. Note Use the machine types included in the following charts for your AWS ARM instances. If you use an instance type that is not listed in the chart, ensure that the instance size you use matches the minimum resource requirements that are listed in "Minimum resource requirements for cluster installation". Example 9.2. Machine types based on 64-bit ARM architecture c6g.* m6g.* r8g.* 9.10.4. Sample customized install-config.yaml file for AWS You can customize the installation configuration file ( install-config.yaml ) to specify more details about your OpenShift Container Platform cluster's platform or modify the values of the required parameters. Important This sample YAML file is provided for reference only. Use it as a resource to enter parameter values into the installation configuration file that you created manually. apiVersion: v1 baseDomain: example.com 1 credentialsMode: Mint 2 controlPlane: 3 4 hyperthreading: Enabled 5 name: master platform: aws: zones: - us-gov-west-1a - us-gov-west-1b rootVolume: iops: 4000 size: 500 type: io1 6 metadataService: authentication: Optional 7 type: m6i.xlarge replicas: 3 compute: 8 - hyperthreading: Enabled 9 name: worker platform: aws: rootVolume: iops: 2000 size: 500 type: io1 10 metadataService: authentication: Optional 11 type: c5.4xlarge zones: - us-gov-west-1c replicas: 3 metadata: name: test-cluster 12 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 13 serviceNetwork: - 172.30.0.0/16 platform: aws: region: us-gov-west-1 14 propagateUserTags: true 15 userTags: adminContact: jdoe costCenter: 7536 subnets: 16 - subnet-1 - subnet-2 - subnet-3 amiID: ami-0c5d3e03c0ab9b19a 17 serviceEndpoints: 18 - name: ec2 url: https://vpce-id.ec2.us-west-2.vpce.amazonaws.com hostedZone: Z3URY6TWQ91KVV 19 fips: false 20 sshKey: ssh-ed25519 AAAA... 21 publish: Internal 22 pullSecret: '{"auths": ...}' 23 1 12 14 23 Required. 2 Optional: Add this parameter to force the Cloud Credential Operator (CCO) to use the specified mode. By default, the CCO uses the root credentials in the kube-system namespace to dynamically try to determine the capabilities of the credentials. For details about CCO modes, see the "About the Cloud Credential Operator" section in the Authentication and authorization guide. 3 8 15 If you do not provide these parameters and values, the installation program provides the default value. 4 The controlPlane section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the compute section must begin with a hyphen, - , and the first line of the controlPlane section must not. Only one control plane pool is used. 5 9 Whether to enable or disable simultaneous multithreading, or hyperthreading . By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to Disabled . If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. Important If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger instance types, such as m4.2xlarge or m5.2xlarge , for your machines if you disable simultaneous multithreading. 6 10 To configure faster storage for etcd, especially for larger clusters, set the storage type as io1 and set iops to 2000 . 7 11 Whether to require the Amazon EC2 Instance Metadata Service v2 (IMDSv2). To require IMDSv2, set the parameter value to Required . To allow the use of both IMDSv1 and IMDSv2, set the parameter value to Optional . If no value is specified, both IMDSv1 and IMDSv2 are allowed. Note The IMDS configuration for control plane machines that is set during cluster installation can only be changed by using the AWS CLI. The IMDS configuration for compute machines can be changed by using compute machine sets. 13 The cluster network plugin to install. The supported values are OVNKubernetes and OpenShiftSDN . The default value is OVNKubernetes . 16 If you provide your own VPC, specify subnets for each availability zone that your cluster uses. 17 The ID of the AMI used to boot machines for the cluster. If set, the AMI must belong to the same region as the cluster. 18 The AWS service endpoints. Custom endpoints are required when installing to an unknown AWS region. The endpoint URL must use the https protocol and the host must trust the certificate. 19 The ID of your existing Route 53 private hosted zone. Providing an existing hosted zone requires that you supply your own VPC and the hosted zone is already associated with the VPC prior to installing your cluster. If undefined, the installation program creates a new hosted zone. 20 Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the Red Hat Enterprise Linux CoreOS (RHCOS) machines that OpenShift Container Platform runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with RHCOS instead. Important To enable FIPS mode for your cluster, you must run the installation program from a Red Hat Enterprise Linux (RHEL) computer configured to operate in FIPS mode. For more information about configuring FIPS mode on RHEL, see Installing the system in FIPS mode . When running Red Hat Enterprise Linux (RHEL) or Red Hat Enterprise Linux CoreOS (RHCOS) booted in FIPS mode, OpenShift Container Platform core components use the RHEL cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64, ppc64le, and s390x architectures. 21 You can optionally provide the sshKey value that you use to access the machines in your cluster. Note For production OpenShift Container Platform clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your ssh-agent process uses. 22 How to publish the user-facing endpoints of your cluster. Set publish to Internal to deploy a private cluster, which cannot be accessed from the internet. The default value is External . 9.10.5. Configuring the cluster-wide proxy during installation Production environments can deny direct access to the internet and instead have an HTTP or HTTPS proxy available. You can configure a new OpenShift Container Platform cluster to use a proxy by configuring the proxy settings in the install-config.yaml file. Prerequisites You have an existing install-config.yaml file. You reviewed the sites that your cluster requires access to and determined whether any of them need to bypass the proxy. By default, all cluster egress traffic is proxied, including calls to hosting cloud provider APIs. You added sites to the Proxy object's spec.noProxy field to bypass the proxy if necessary. Note The Proxy object status.noProxy field is populated with the values of the networking.machineNetwork[].cidr , networking.clusterNetwork[].cidr , and networking.serviceNetwork[] fields from your installation configuration. For installations on Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, and Red Hat OpenStack Platform (RHOSP), the Proxy object status.noProxy field is also populated with the instance metadata endpoint ( 169.254.169.254 ). Procedure Edit your install-config.yaml file and add the proxy settings. For example: apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: ec2.<aws_region>.amazonaws.com,elasticloadbalancing.<aws_region>.amazonaws.com,s3.<aws_region>.amazonaws.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5 1 A proxy URL to use for creating HTTP connections outside the cluster. The URL scheme must be http . 2 A proxy URL to use for creating HTTPS connections outside the cluster. 3 A comma-separated list of destination domain names, IP addresses, or other network CIDRs to exclude from proxying. Preface a domain with . to match subdomains only. For example, .y.com matches x.y.com , but not y.com . Use * to bypass the proxy for all destinations. If you have added the Amazon EC2 , Elastic Load Balancing , and S3 VPC endpoints to your VPC, you must add these endpoints to the noProxy field. 4 If provided, the installation program generates a config map that is named user-ca-bundle in the openshift-config namespace that contains one or more additional CA certificates that are required for proxying HTTPS connections. The Cluster Network Operator then creates a trusted-ca-bundle config map that merges these contents with the Red Hat Enterprise Linux CoreOS (RHCOS) trust bundle, and this config map is referenced in the trustedCA field of the Proxy object. The additionalTrustBundle field is required unless the proxy's identity certificate is signed by an authority from the RHCOS trust bundle. 5 Optional: The policy to determine the configuration of the Proxy object to reference the user-ca-bundle config map in the trustedCA field. The allowed values are Proxyonly and Always . Use Proxyonly to reference the user-ca-bundle config map only when http/https proxy is configured. Use Always to always reference the user-ca-bundle config map. The default value is Proxyonly . Note The installation program does not support the proxy readinessEndpoints field. Note If the installer times out, restart and then complete the deployment by using the wait-for command of the installer. For example: USD ./openshift-install wait-for install-complete --log-level debug Save the file and reference it when installing OpenShift Container Platform. The installation program creates a cluster-wide proxy that is named cluster that uses the proxy settings in the provided install-config.yaml file. If no proxy settings are provided, a cluster Proxy object is still created, but it will have a nil spec . Note Only the Proxy object named cluster is supported, and no additional proxies can be created. 9.10.6. Applying existing AWS security groups to the cluster Applying existing AWS security groups to your control plane and compute machines can help you meet the security needs of your organization, in such cases where you need to control the incoming or outgoing traffic of these machines. Prerequisites You have created the security groups in AWS. For more information, see the AWS documentation about working with security groups . The security groups must be associated with the existing VPC that you are deploying the cluster to. The security groups cannot be associated with another VPC. You have an existing install-config.yaml file. Procedure In the install-config.yaml file, edit the compute.platform.aws.additionalSecurityGroupIDs parameter to specify one or more custom security groups for your compute machines. Edit the controlPlane.platform.aws.additionalSecurityGroupIDs parameter to specify one or more custom security groups for your control plane machines. Save the file and reference it when deploying the cluster. Sample install-config.yaml file that specifies custom security groups # ... compute: - hyperthreading: Enabled name: worker platform: aws: additionalSecurityGroupIDs: - sg-1 1 - sg-2 replicas: 3 controlPlane: hyperthreading: Enabled name: master platform: aws: additionalSecurityGroupIDs: - sg-3 - sg-4 replicas: 3 platform: aws: region: us-east-1 subnets: 2 - subnet-1 - subnet-2 - subnet-3 1 Specify the name of the security group as it appears in the Amazon EC2 console, including the sg prefix. 2 Specify subnets for each availability zone that your cluster uses. 9.11. Installing the OpenShift CLI by downloading the binary You can install the OpenShift CLI ( oc ) to interact with OpenShift Container Platform from a command-line interface. You can install oc on Linux, Windows, or macOS. Important If you installed an earlier version of oc , you cannot use it to complete all of the commands in OpenShift Container Platform 4.14. Download and install the new version of oc . Installing the OpenShift CLI on Linux You can install the OpenShift CLI ( oc ) binary on Linux by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the architecture from the Product Variant drop-down list. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.14 Linux Client entry and save the file. Unpack the archive: USD tar xvf <file> Place the oc binary in a directory that is on your PATH . To check your PATH , execute the following command: USD echo USDPATH Verification After you install the OpenShift CLI, it is available using the oc command: USD oc <command> Installing the OpenShift CLI on Windows You can install the OpenShift CLI ( oc ) binary on Windows by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.14 Windows Client entry and save the file. Unzip the archive with a ZIP program. Move the oc binary to a directory that is on your PATH . To check your PATH , open the command prompt and execute the following command: C:\> path Verification After you install the OpenShift CLI, it is available using the oc command: C:\> oc <command> Installing the OpenShift CLI on macOS You can install the OpenShift CLI ( oc ) binary on macOS by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.14 macOS Client entry and save the file. Note For macOS arm64, choose the OpenShift v4.14 macOS arm64 Client entry. Unpack and unzip the archive. Move the oc binary to a directory on your PATH. To check your PATH , open a terminal and execute the following command: USD echo USDPATH Verification Verify your installation by using an oc command: USD oc <command> 9.12. Alternatives to storing administrator-level secrets in the kube-system project By default, administrator secrets are stored in the kube-system project. If you configured the credentialsMode parameter in the install-config.yaml file to Manual , you must use one of the following alternatives: To manage long-term cloud credentials manually, follow the procedure in Manually creating long-term credentials . To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in Incorporating the Cloud Credential Operator utility manifests . 9.12.1. Manually creating long-term credentials The Cloud Credential Operator (CCO) can be put into manual mode prior to installation in environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster kube-system namespace. Procedure If you did not set the credentialsMode parameter in the install-config.yaml configuration file to Manual , modify the value as shown: Sample configuration file snippet apiVersion: v1 baseDomain: example.com credentialsMode: Manual # ... If you have not previously created installation manifest files, do so by running the following command: USD openshift-install create manifests --dir <installation_directory> where <installation_directory> is the directory in which the installation program creates files. Set a USDRELEASE_IMAGE variable with the release image from your installation file by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Extract the list of CredentialsRequest custom resources (CRs) from the OpenShift Container Platform release image by running the following command: USD oc adm release extract \ --from=USDRELEASE_IMAGE \ --credentials-requests \ --included \ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \ 2 --to=<path_to_directory_for_credentials_requests> 3 1 The --included parameter includes only the manifests that your specific cluster configuration requires. 2 Specify the location of the install-config.yaml file. 3 Specify the path to the directory where you want to store the CredentialsRequest objects. If the specified directory does not exist, this command creates it. This command creates a YAML file for each CredentialsRequest object. Sample CredentialsRequest object apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator ... spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AWSProviderSpec statementEntries: - effect: Allow action: - iam:GetUser - iam:GetUserPolicy - iam:ListAccessKeys resource: "*" ... Create YAML files for secrets in the openshift-install manifests directory that you generated previously. The secrets must be stored using the namespace and secret name defined in the spec.secretRef for each CredentialsRequest object. Sample CredentialsRequest object with secrets apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator ... spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AWSProviderSpec statementEntries: - effect: Allow action: - s3:CreateBucket - s3:DeleteBucket resource: "*" ... secretRef: name: <component_secret> namespace: <component_namespace> ... Sample Secret object apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: aws_access_key_id: <base64_encoded_aws_access_key_id> aws_secret_access_key: <base64_encoded_aws_secret_access_key> Important Before upgrading a cluster that uses manually maintained credentials, you must ensure that the CCO is in an upgradeable state. 9.12.2. Configuring an AWS cluster to use short-term credentials To install a cluster that is configured to use the AWS Security Token Service (STS), you must configure the CCO utility and create the required AWS resources for your cluster. 9.12.2.1. Configuring the Cloud Credential Operator utility To create and manage cloud credentials from outside of the cluster when the Cloud Credential Operator (CCO) is operating in manual mode, extract and prepare the CCO utility ( ccoctl ) binary. Note The ccoctl utility is a Linux binary that must run in a Linux environment. Prerequisites You have access to an OpenShift Container Platform account with cluster administrator access. You have installed the OpenShift CLI ( oc ). You have created an AWS account for the ccoctl utility to use with the following permissions: Example 9.3. Required AWS permissions Required iam permissions iam:CreateOpenIDConnectProvider iam:CreateRole iam:DeleteOpenIDConnectProvider iam:DeleteRole iam:DeleteRolePolicy iam:GetOpenIDConnectProvider iam:GetRole iam:GetUser iam:ListOpenIDConnectProviders iam:ListRolePolicies iam:ListRoles iam:PutRolePolicy iam:TagOpenIDConnectProvider iam:TagRole Required s3 permissions s3:CreateBucket s3:DeleteBucket s3:DeleteObject s3:GetBucketAcl s3:GetBucketTagging s3:GetObject s3:GetObjectAcl s3:GetObjectTagging s3:ListBucket s3:PutBucketAcl s3:PutBucketPolicy s3:PutBucketPublicAccessBlock s3:PutBucketTagging s3:PutObject s3:PutObjectAcl s3:PutObjectTagging Required cloudfront permissions cloudfront:ListCloudFrontOriginAccessIdentities cloudfront:ListDistributions cloudfront:ListTagsForResource If you plan to store the OIDC configuration in a private S3 bucket that is accessed by the IAM identity provider through a public CloudFront distribution URL, the AWS account that runs the ccoctl utility requires the following additional permissions: Example 9.4. Additional permissions for a private S3 bucket with CloudFront cloudfront:CreateCloudFrontOriginAccessIdentity cloudfront:CreateDistribution cloudfront:DeleteCloudFrontOriginAccessIdentity cloudfront:DeleteDistribution cloudfront:GetCloudFrontOriginAccessIdentity cloudfront:GetCloudFrontOriginAccessIdentityConfig cloudfront:GetDistribution cloudfront:TagResource cloudfront:UpdateDistribution Note These additional permissions support the use of the --create-private-s3-bucket option when processing credentials requests with the ccoctl aws create-all command. Procedure Set a variable for the OpenShift Container Platform release image by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Obtain the CCO container image from the OpenShift Container Platform release image by running the following command: USD CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret) Note Ensure that the architecture of the USDRELEASE_IMAGE matches the architecture of the environment in which you will use the ccoctl tool. Extract the ccoctl binary from the CCO container image within the OpenShift Container Platform release image by running the following command: USD oc image extract USDCCO_IMAGE --file="/usr/bin/ccoctl" -a ~/.pull-secret Change the permissions to make ccoctl executable by running the following command: USD chmod 775 ccoctl Verification To verify that ccoctl is ready to use, display the help file. Use a relative file name when you run the command, for example: USD ./ccoctl.rhel9 Example output OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: alibabacloud Manage credentials objects for alibaba cloud aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for IBM Cloud nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use "ccoctl [command] --help" for more information about a command. 9.12.2.2. Creating AWS resources with the Cloud Credential Operator utility You have the following options when creating AWS resources: You can use the ccoctl aws create-all command to create the AWS resources automatically. This is the quickest way to create the resources. See Creating AWS resources with a single command . If you need to review the JSON files that the ccoctl tool creates before modifying AWS resources, or if the process the ccoctl tool uses to create AWS resources automatically does not meet the requirements of your organization, you can create the AWS resources individually. See Creating AWS resources individually . 9.12.2.2.1. Creating AWS resources with a single command If the process the ccoctl tool uses to create AWS resources automatically meets the requirements of your organization, you can use the ccoctl aws create-all command to automate the creation of AWS resources. Otherwise, you can create the AWS resources individually. For more information, see "Creating AWS resources individually". Note By default, ccoctl creates objects in the directory in which the commands are run. To create the objects in a different directory, use the --output-dir flag. This procedure uses <path_to_ccoctl_output_dir> to refer to this directory. Prerequisites You must have: Extracted and prepared the ccoctl binary. Procedure Set a USDRELEASE_IMAGE variable with the release image from your installation file by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Extract the list of CredentialsRequest objects from the OpenShift Container Platform release image by running the following command: USD oc adm release extract \ --from=USDRELEASE_IMAGE \ --credentials-requests \ --included \ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \ 2 --to=<path_to_directory_for_credentials_requests> 3 1 The --included parameter includes only the manifests that your specific cluster configuration requires. 2 Specify the location of the install-config.yaml file. 3 Specify the path to the directory where you want to store the CredentialsRequest objects. If the specified directory does not exist, this command creates it. Note This command might take a few moments to run. Use the ccoctl tool to process all CredentialsRequest objects by running the following command: USD ccoctl aws create-all \ --name=<name> \ 1 --region=<aws_region> \ 2 --credentials-requests-dir=<path_to_credentials_requests_directory> \ 3 --output-dir=<path_to_ccoctl_output_dir> \ 4 --create-private-s3-bucket 5 1 Specify the name used to tag any cloud resources that are created for tracking. 2 Specify the AWS region in which cloud resources will be created. 3 Specify the directory containing the files for the component CredentialsRequest objects. 4 Optional: Specify the directory in which you want the ccoctl utility to create objects. By default, the utility creates objects in the directory in which the commands are run. 5 Optional: By default, the ccoctl utility stores the OpenID Connect (OIDC) configuration files in a public S3 bucket and uses the S3 URL as the public OIDC endpoint. To store the OIDC configuration in a private S3 bucket that is accessed by the IAM identity provider through a public CloudFront distribution URL instead, use the --create-private-s3-bucket parameter. Note If your cluster uses Technology Preview features that are enabled by the TechPreviewNoUpgrade feature set, you must include the --enable-tech-preview parameter. Verification To verify that the OpenShift Container Platform secrets are created, list the files in the <path_to_ccoctl_output_dir>/manifests directory: USD ls <path_to_ccoctl_output_dir>/manifests Example output cluster-authentication-02-config.yaml openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capa-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-aws-cloud-credentials-credentials.yaml You can verify that the IAM roles are created by querying AWS. For more information, refer to AWS documentation on listing IAM roles. 9.12.2.2.2. Creating AWS resources individually You can use the ccoctl tool to create AWS resources individually. This option might be useful for an organization that shares the responsibility for creating these resources among different users or departments. Otherwise, you can use the ccoctl aws create-all command to create the AWS resources automatically. For more information, see "Creating AWS resources with a single command". Note By default, ccoctl creates objects in the directory in which the commands are run. To create the objects in a different directory, use the --output-dir flag. This procedure uses <path_to_ccoctl_output_dir> to refer to this directory. Some ccoctl commands make AWS API calls to create or modify AWS resources. You can use the --dry-run flag to avoid making API calls. Using this flag creates JSON files on the local file system instead. You can review and modify the JSON files and then apply them with the AWS CLI tool using the --cli-input-json parameters. Prerequisites Extract and prepare the ccoctl binary. Procedure Generate the public and private RSA key files that are used to set up the OpenID Connect provider for the cluster by running the following command: USD ccoctl aws create-key-pair Example output 2021/04/13 11:01:02 Generating RSA keypair 2021/04/13 11:01:03 Writing private key to /<path_to_ccoctl_output_dir>/serviceaccount-signer.private 2021/04/13 11:01:03 Writing public key to /<path_to_ccoctl_output_dir>/serviceaccount-signer.public 2021/04/13 11:01:03 Copying signing key for use by installer where serviceaccount-signer.private and serviceaccount-signer.public are the generated key files. This command also creates a private key that the cluster requires during installation in /<path_to_ccoctl_output_dir>/tls/bound-service-account-signing-key.key . Create an OpenID Connect identity provider and S3 bucket on AWS by running the following command: USD ccoctl aws create-identity-provider \ --name=<name> \ 1 --region=<aws_region> \ 2 --public-key-file=<path_to_ccoctl_output_dir>/serviceaccount-signer.public 3 1 <name> is the name used to tag any cloud resources that are created for tracking. 2 <aws-region> is the AWS region in which cloud resources will be created. 3 <path_to_ccoctl_output_dir> is the path to the public key file that the ccoctl aws create-key-pair command generated. Example output 2021/04/13 11:16:09 Bucket <name>-oidc created 2021/04/13 11:16:10 OpenID Connect discovery document in the S3 bucket <name>-oidc at .well-known/openid-configuration updated 2021/04/13 11:16:10 Reading public key 2021/04/13 11:16:10 JSON web key set (JWKS) in the S3 bucket <name>-oidc at keys.json updated 2021/04/13 11:16:18 Identity Provider created with ARN: arn:aws:iam::<aws_account_id>:oidc-provider/<name>-oidc.s3.<aws_region>.amazonaws.com where openid-configuration is a discovery document and keys.json is a JSON web key set file. This command also creates a YAML configuration file in /<path_to_ccoctl_output_dir>/manifests/cluster-authentication-02-config.yaml . This file sets the issuer URL field for the service account tokens that the cluster generates, so that the AWS IAM identity provider trusts the tokens. Create IAM roles for each component in the cluster: Set a USDRELEASE_IMAGE variable with the release image from your installation file by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Extract the list of CredentialsRequest objects from the OpenShift Container Platform release image: USD oc adm release extract \ --from=USDRELEASE_IMAGE \ --credentials-requests \ --included \ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \ 2 --to=<path_to_directory_for_credentials_requests> 3 1 The --included parameter includes only the manifests that your specific cluster configuration requires. 2 Specify the location of the install-config.yaml file. 3 Specify the path to the directory where you want to store the CredentialsRequest objects. If the specified directory does not exist, this command creates it. Use the ccoctl tool to process all CredentialsRequest objects by running the following command: USD ccoctl aws create-iam-roles \ --name=<name> \ --region=<aws_region> \ --credentials-requests-dir=<path_to_credentials_requests_directory> \ --identity-provider-arn=arn:aws:iam::<aws_account_id>:oidc-provider/<name>-oidc.s3.<aws_region>.amazonaws.com Note For AWS environments that use alternative IAM API endpoints, such as GovCloud, you must also specify your region with the --region parameter. If your cluster uses Technology Preview features that are enabled by the TechPreviewNoUpgrade feature set, you must include the --enable-tech-preview parameter. For each CredentialsRequest object, ccoctl creates an IAM role with a trust policy that is tied to the specified OIDC identity provider, and a permissions policy as defined in each CredentialsRequest object from the OpenShift Container Platform release image. Verification To verify that the OpenShift Container Platform secrets are created, list the files in the <path_to_ccoctl_output_dir>/manifests directory: USD ls <path_to_ccoctl_output_dir>/manifests Example output cluster-authentication-02-config.yaml openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capa-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-aws-cloud-credentials-credentials.yaml You can verify that the IAM roles are created by querying AWS. For more information, refer to AWS documentation on listing IAM roles. 9.12.2.3. Incorporating the Cloud Credential Operator utility manifests To implement short-term security credentials managed outside the cluster for individual components, you must move the manifest files that the Cloud Credential Operator utility ( ccoctl ) created to the correct directories for the installation program. Prerequisites You have configured an account with the cloud platform that hosts your cluster. You have configured the Cloud Credential Operator utility ( ccoctl ). You have created the cloud provider resources that are required for your cluster with the ccoctl utility. Procedure If you did not set the credentialsMode parameter in the install-config.yaml configuration file to Manual , modify the value as shown: Sample configuration file snippet apiVersion: v1 baseDomain: example.com credentialsMode: Manual # ... If you have not previously created installation manifest files, do so by running the following command: USD openshift-install create manifests --dir <installation_directory> where <installation_directory> is the directory in which the installation program creates files. Copy the manifests that the ccoctl utility generated to the manifests directory that the installation program created by running the following command: USD cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/ Copy the tls directory that contains the private key to the installation directory: USD cp -a /<path_to_ccoctl_output_dir>/tls . 9.13. Deploying the cluster You can install OpenShift Container Platform on a compatible cloud platform. Important You can run the create cluster command of the installation program only once, during initial installation. Prerequisites You have configured an account with the cloud platform that hosts your cluster. You have the OpenShift Container Platform installation program and the pull secret for your cluster. You have verified that the cloud provider account on your host has the correct permissions to deploy the cluster. An account with incorrect permissions causes the installation process to fail with an error message that displays the missing permissions. Procedure Change to the directory that contains the installation program and initialize the cluster deployment: USD ./openshift-install create cluster --dir <installation_directory> \ 1 --log-level=info 2 1 For <installation_directory> , specify the location of your customized ./install-config.yaml file. 2 To view different installation details, specify warn , debug , or error instead of info . Optional: Remove or disable the AdministratorAccess policy from the IAM account that you used to install the cluster. Note The elevated permissions provided by the AdministratorAccess policy are required only during installation. Verification When the cluster deployment completes successfully: The terminal displays directions for accessing your cluster, including a link to the web console and credentials for the kubeadmin user. Credential information also outputs to <installation_directory>/.openshift_install.log . Important Do not delete the installation program or the files that the installation program creates. Both are required to delete the cluster. Example output ... INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: "kubeadmin", and password: "password" INFO Time elapsed: 36m22s Important The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending node-bootstrapper certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for Recovering from expired control plane certificates for more information. It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. 9.14. Logging in to the cluster by using the CLI You can log in to your cluster as a default system user by exporting the cluster kubeconfig file. The kubeconfig file contains information about the cluster that is used by the CLI to connect a client to the correct cluster and API server. The file is specific to a cluster and is created during OpenShift Container Platform installation. Prerequisites You deployed an OpenShift Container Platform cluster. You installed the oc CLI. Procedure Export the kubeadmin credentials: USD export KUBECONFIG=<installation_directory>/auth/kubeconfig 1 1 For <installation_directory> , specify the path to the directory that you stored the installation files in. Verify you can run oc commands successfully using the exported configuration: USD oc whoami Example output system:admin /validating-an-installation.adoc 9.15. Logging in to the cluster by using the web console The kubeadmin user exists by default after an OpenShift Container Platform installation. You can log in to your cluster as the kubeadmin user by using the OpenShift Container Platform web console. Prerequisites You have access to the installation host. You completed a cluster installation and all cluster Operators are available. Procedure Obtain the password for the kubeadmin user from the kubeadmin-password file on the installation host: USD cat <installation_directory>/auth/kubeadmin-password Note Alternatively, you can obtain the kubeadmin password from the <installation_directory>/.openshift_install.log log file on the installation host. List the OpenShift Container Platform web console route: USD oc get routes -n openshift-console | grep 'console-openshift' Note Alternatively, you can obtain the OpenShift Container Platform route from the <installation_directory>/.openshift_install.log log file on the installation host. Example output console console-openshift-console.apps.<cluster_name>.<base_domain> console https reencrypt/Redirect None Navigate to the route detailed in the output of the preceding command in a web browser and log in as the kubeadmin user. Additional resources See Accessing the web console for more details about accessing and understanding the OpenShift Container Platform web console. 9.16. Telemetry access for OpenShift Container Platform In OpenShift Container Platform 4.14, the Telemetry service, which runs by default to provide metrics about cluster health and the success of updates, requires internet access. If your cluster is connected to the internet, Telemetry runs automatically, and your cluster is registered to OpenShift Cluster Manager . After you confirm that your OpenShift Cluster Manager inventory is correct, either maintained automatically by Telemetry or manually by using OpenShift Cluster Manager, use subscription watch to track your OpenShift Container Platform subscriptions at the account or multi-cluster level. Additional resources See About remote health monitoring for more information about the Telemetry service. 9.17. steps Validating an installation . Customize your cluster . If necessary, you can opt out of remote health reporting . If necessary, you can remove cloud provider credentials .
[ "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "apiVersion: v1 baseDomain: example.com compute: - hyperthreading: Enabled name: worker platform: aws: amiID: ami-06c4d345f7c207239 1 type: m5.4xlarge replicas: 3 metadata: name: test-cluster platform: aws: region: us-east-2 2 sshKey: ssh-ed25519 AAAA pullSecret: '{\"auths\": ...}'", "tar -xvf openshift-install-linux.tar.gz", "mkdir <installation_directory>", "apiVersion: v1 baseDomain: example.com 1 credentialsMode: Mint 2 controlPlane: 3 4 hyperthreading: Enabled 5 name: master platform: aws: zones: - us-gov-west-1a - us-gov-west-1b rootVolume: iops: 4000 size: 500 type: io1 6 metadataService: authentication: Optional 7 type: m6i.xlarge replicas: 3 compute: 8 - hyperthreading: Enabled 9 name: worker platform: aws: rootVolume: iops: 2000 size: 500 type: io1 10 metadataService: authentication: Optional 11 type: c5.4xlarge zones: - us-gov-west-1c replicas: 3 metadata: name: test-cluster 12 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 13 serviceNetwork: - 172.30.0.0/16 platform: aws: region: us-gov-west-1 14 propagateUserTags: true 15 userTags: adminContact: jdoe costCenter: 7536 subnets: 16 - subnet-1 - subnet-2 - subnet-3 amiID: ami-0c5d3e03c0ab9b19a 17 serviceEndpoints: 18 - name: ec2 url: https://vpce-id.ec2.us-west-2.vpce.amazonaws.com hostedZone: Z3URY6TWQ91KVV 19 fips: false 20 sshKey: ssh-ed25519 AAAA... 21 publish: Internal 22 pullSecret: '{\"auths\": ...}' 23", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: ec2.<aws_region>.amazonaws.com,elasticloadbalancing.<aws_region>.amazonaws.com,s3.<aws_region>.amazonaws.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "compute: - hyperthreading: Enabled name: worker platform: aws: additionalSecurityGroupIDs: - sg-1 1 - sg-2 replicas: 3 controlPlane: hyperthreading: Enabled name: master platform: aws: additionalSecurityGroupIDs: - sg-3 - sg-4 replicas: 3 platform: aws: region: us-east-1 subnets: 2 - subnet-1 - subnet-2 - subnet-3", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AWSProviderSpec statementEntries: - effect: Allow action: - iam:GetUser - iam:GetUserPolicy - iam:ListAccessKeys resource: \"*\"", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: AWSProviderSpec statementEntries: - effect: Allow action: - s3:CreateBucket - s3:DeleteBucket resource: \"*\" secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: aws_access_key_id: <base64_encoded_aws_access_key_id> aws_secret_access_key: <base64_encoded_aws_secret_access_key>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl\" -a ~/.pull-secret", "chmod 775 ccoctl", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: alibabacloud Manage credentials objects for alibaba cloud aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for IBM Cloud nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "ccoctl aws create-all --name=<name> \\ 1 --region=<aws_region> \\ 2 --credentials-requests-dir=<path_to_credentials_requests_directory> \\ 3 --output-dir=<path_to_ccoctl_output_dir> \\ 4 --create-private-s3-bucket 5", "ls <path_to_ccoctl_output_dir>/manifests", "cluster-authentication-02-config.yaml openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capa-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-aws-cloud-credentials-credentials.yaml", "ccoctl aws create-key-pair", "2021/04/13 11:01:02 Generating RSA keypair 2021/04/13 11:01:03 Writing private key to /<path_to_ccoctl_output_dir>/serviceaccount-signer.private 2021/04/13 11:01:03 Writing public key to /<path_to_ccoctl_output_dir>/serviceaccount-signer.public 2021/04/13 11:01:03 Copying signing key for use by installer", "ccoctl aws create-identity-provider --name=<name> \\ 1 --region=<aws_region> \\ 2 --public-key-file=<path_to_ccoctl_output_dir>/serviceaccount-signer.public 3", "2021/04/13 11:16:09 Bucket <name>-oidc created 2021/04/13 11:16:10 OpenID Connect discovery document in the S3 bucket <name>-oidc at .well-known/openid-configuration updated 2021/04/13 11:16:10 Reading public key 2021/04/13 11:16:10 JSON web key set (JWKS) in the S3 bucket <name>-oidc at keys.json updated 2021/04/13 11:16:18 Identity Provider created with ARN: arn:aws:iam::<aws_account_id>:oidc-provider/<name>-oidc.s3.<aws_region>.amazonaws.com", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "ccoctl aws create-iam-roles --name=<name> --region=<aws_region> --credentials-requests-dir=<path_to_credentials_requests_directory> --identity-provider-arn=arn:aws:iam::<aws_account_id>:oidc-provider/<name>-oidc.s3.<aws_region>.amazonaws.com", "ls <path_to_ccoctl_output_dir>/manifests", "cluster-authentication-02-config.yaml openshift-cloud-credential-operator-cloud-credential-operator-iam-ro-creds-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capa-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-ebs-cloud-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-aws-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "cat <installation_directory>/auth/kubeadmin-password", "oc get routes -n openshift-console | grep 'console-openshift'", "console console-openshift-console.apps.<cluster_name>.<base_domain> console https reencrypt/Redirect None" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.14/html/installing_on_aws/installing-aws-government-region
Chapter 11. Networks
Chapter 11. Networks 11.1. Network Elements The networks collection provides information about the logical networks in a Red Hat Virtualization environment. An API user accesses this information through the rel="networks" link obtained from the entry point URI. The following table shows specific elements contained in a network resource representation. Table 11.1. Network elements Element Type Description Properties link rel="vnicprofiles" relationship A link to the sub-collection for VNIC profiles attached to this logical network. link rel="labels" relationship A link to the sub-collection for labels attached to this logical network. data_center id= GUID A reference to the data center of which this cluster is a member. vlan id= integer A VLAN tag. stp Boolean: true or false true if Spanning Tree Protocol is enabled on this network. mtu integer Sets the maximum transmission unit for the logical network. If omitted, the logical network uses the default value. status One of operational or non_operational The status of the network. These states are listed in network_states under capabilities . usages complex Defines a set of usage elements for the network. Users can define networks as VM networks at this level. Important The API as documented in this section is experimental and subject to change. It is not covered by the backwards compatibility statement.
null
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.3/html/version_3_rest_api_guide/chap-networks
Chapter 14. Adding custom SSL/TLS certificates when Red Hat Quay is deployed on Kubernetes
Chapter 14. Adding custom SSL/TLS certificates when Red Hat Quay is deployed on Kubernetes When deployed on Kubernetes, Red Hat Quay mounts in a secret as a volume to store config assets. Currently, this breaks the upload certificate function of the superuser panel. As a temporary workaround, base64 encoded certificates can be added to the secret after Red Hat Quay has been deployed. Use the following procedure to add custom SSL/TLS certificates when Red Hat Quay is deployed on Kubernetes. Prerequisites Red Hat Quay has been deployed. You have a custom ca.crt file. Procedure Base64 encode the contents of an SSL/TLS certificate by entering the following command: USD cat ca.crt | base64 -w 0 Example output ...c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= Enter the following kubectl command to edit the quay-enterprise-config-secret file: USD kubectl --namespace quay-enterprise edit secret/quay-enterprise-config-secret Add an entry for the certificate and paste the full base64 encoded stringer under the entry. For example: custom-cert.crt: c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= Use the kubectl delete command to remove all Red Hat Quay pods. For example: USD kubectl delete pod quay-operator.v3.7.1-6f9d859bd-p5ftc quayregistry-clair-postgres-7487f5bd86-xnxpr quayregistry-quay-app-upgrade-xq2v6 quayregistry-quay-config-editor-6dfdcfc44f-hlvwm quayregistry-quay-database-859d5445ff-cqthr quayregistry-quay-redis-84f888776f-hhgms Afterwards, the Red Hat Quay deployment automatically schedules replace pods with the new certificate data.
[ "cat ca.crt | base64 -w 0", "...c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", "kubectl --namespace quay-enterprise edit secret/quay-enterprise-config-secret", "custom-cert.crt: c1psWGpqeGlPQmNEWkJPMjJ5d0pDemVnR2QNCnRsbW9JdEF4YnFSdVd3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", "kubectl delete pod quay-operator.v3.7.1-6f9d859bd-p5ftc quayregistry-clair-postgres-7487f5bd86-xnxpr quayregistry-quay-app-upgrade-xq2v6 quayregistry-quay-config-editor-6dfdcfc44f-hlvwm quayregistry-quay-database-859d5445ff-cqthr quayregistry-quay-redis-84f888776f-hhgms" ]
https://docs.redhat.com/en/documentation/red_hat_quay/3.9/html/red_hat_quay_operator_features/config-custom-ssl-cert-kubernetes
Chapter 18. Azure Storage Queue Source
Chapter 18. Azure Storage Queue Source Receive Messages from Azure Storage queues. Important The Azure Storage Queue Source Kamelet is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview . 18.1. Configuration Options The following table summarizes the configuration options available for the azure-storage-queue-source Kamelet: Property Name Description Type Default Example accessKey * Access Key The Azure Storage Queue access Key. string accountName * Account Name The Azure Storage Queue account name. string queueName * Queue Name The Azure Storage Queue container name. string maxMessages Maximum Messages Maximum number of messages to get, if there are less messages exist in the queue than requested all the messages will be returned. By default it will consider 1 message to be retrieved, the allowed range is 1 to 32 messages. int 1 Note Fields marked with an asterisk (*) are mandatory. 18.2. Dependencies At runtime, the azure-storage-queue-source Kamelet relies upon the presence of the following dependencies: camel:azure-storage-queue camel:kamelet 18.3. Usage This section describes how you can use the azure-storage-queue-source . 18.3.1. Knative Source You can use the azure-storage-queue-source Kamelet as a Knative source by binding it to a Knative object. azure-storage-queue-source-binding.yaml apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: azure-storage-queue-source-binding spec: source: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: azure-storage-queue-source properties: accessKey: "The Access Key" accountName: "The Account Name" queueName: "The Queue Name" sink: ref: kind: Channel apiVersion: messaging.knative.dev/v1 name: mychannel 18.3.1.1. Prerequisite Make sure you have "Red Hat Integration - Camel K" installed into the OpenShift cluster you're connected to. 18.3.1.2. Procedure for using the cluster CLI Save the azure-storage-queue-source-binding.yaml file to your local drive, and then edit it as needed for your configuration. Run the source by using the following command: oc apply -f azure-storage-queue-source-binding.yaml 18.3.1.3. Procedure for using the Kamel CLI Configure and run the source by using the following command: kamel bind azure-storage-queue-source -p "source.accessKey=The Access Key" -p "source.accountName=The Account Name" -p "source.queueName=The Queue Name" channel:mychannel This command creates the KameletBinding in the current namespace on the cluster. 18.3.2. Kafka Source You can use the azure-storage-queue-source Kamelet as a Kafka source by binding it to a Kafka topic. azure-storage-queue-source-binding.yaml apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: azure-storage-queue-source-binding spec: source: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: azure-storage-queue-source properties: accessKey: "The Access Key" accountName: "The Account Name" queueName: "The Queue Name" sink: ref: kind: KafkaTopic apiVersion: kafka.strimzi.io/v1beta1 name: my-topic 18.3.2.1. Prerequisites Ensure that you've installed the AMQ Streams operator in your OpenShift cluster and created a topic named my-topic in the current namespace. Make also sure you have "Red Hat Integration - Camel K" installed into the OpenShift cluster you're connected to. 18.3.2.2. Procedure for using the cluster CLI Save the azure-storage-queue-source-binding.yaml file to your local drive, and then edit it as needed for your configuration. Run the source by using the following command: oc apply -f azure-storage-queue-source-binding.yaml 18.3.2.3. Procedure for using the Kamel CLI Configure and run the source by using the following command: kamel bind azure-storage-queue-source -p "source.accessKey=The Access Key" -p "source.accountName=The Account Name" -p "source.queueName=The Queue Name" kafka.strimzi.io/v1beta1:KafkaTopic:my-topic This command creates the KameletBinding in the current namespace on the cluster. 18.4. Kamelet source file https://github.com/openshift-integration/kamelet-catalog/azure-storage-queue-source.kamelet.yaml
[ "apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: azure-storage-queue-source-binding spec: source: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: azure-storage-queue-source properties: accessKey: \"The Access Key\" accountName: \"The Account Name\" queueName: \"The Queue Name\" sink: ref: kind: Channel apiVersion: messaging.knative.dev/v1 name: mychannel", "apply -f azure-storage-queue-source-binding.yaml", "kamel bind azure-storage-queue-source -p \"source.accessKey=The Access Key\" -p \"source.accountName=The Account Name\" -p \"source.queueName=The Queue Name\" channel:mychannel", "apiVersion: camel.apache.org/v1alpha1 kind: KameletBinding metadata: name: azure-storage-queue-source-binding spec: source: ref: kind: Kamelet apiVersion: camel.apache.org/v1alpha1 name: azure-storage-queue-source properties: accessKey: \"The Access Key\" accountName: \"The Account Name\" queueName: \"The Queue Name\" sink: ref: kind: KafkaTopic apiVersion: kafka.strimzi.io/v1beta1 name: my-topic", "apply -f azure-storage-queue-source-binding.yaml", "kamel bind azure-storage-queue-source -p \"source.accessKey=The Access Key\" -p \"source.accountName=The Account Name\" -p \"source.queueName=The Queue Name\" kafka.strimzi.io/v1beta1:KafkaTopic:my-topic" ]
https://docs.redhat.com/en/documentation/red_hat_build_of_apache_camel_k/1.10.9/html/kamelets_reference/azure_storage_queue_source
Chapter 3. Migrating data between cache stores
Chapter 3. Migrating data between cache stores Data Grid provides a Java utility for migrating persistent data between cache stores. In the case of upgrading Data Grid, functional differences between major versions do not allow backwards compatibility between cache stores. You can use StoreMigrator to convert your data so that it is compatible with the target version. For example, upgrading to Data Grid 8.0 changes the default marshaller to Protostream. In Data Grid versions, cache stores use a binary format that is not compatible with the changes to marshalling. This means that Data Grid 8.0 cannot read from cache stores with Data Grid versions. In other cases Data Grid versions deprecate or remove cache store implementations, such as JDBC Mixed and Binary stores. You can use StoreMigrator in these cases to convert to different cache store implementations. 3.1. Cache store migrator Data Grid provides the CLI migrate store command that recreates data for the latest Data Grid cache store implementations. The store migrator takes a cache store from a version of Data Grid as source and uses a cache store implementation as target. When you run the store migrator, it creates the target cache with the cache store type that you define using the EmbeddedCacheManager interface. The store migrator then loads entries from the source store into memory and then puts them into the target cache. The store migrator also lets you migrate data from one type of cache store to another. For example, you can migrate from a JDBC string-based cache store to a SIFS cache store. Important The store migrator cannot migrate data from segmented cache stores to: Non-segmented cache store. Segmented cache stores that have a different number of segments. 3.2. Configuring the cache store migrator Use the migrator.properties file to configure properties for source and target cache stores. Procedure Create a migrator.properties file. Configure properties for source and target cache store using the migrator.properties file. Add the source. prefix to all configuration properties for the source cache store. Example source cache store Important For migrating data from segmented cache stores, you must also configure the number of segments using the source.segment_count property. The number of segments must match clustering.hash.numSegments in your Data Grid configuration. If the number of segments for a cache store does not match the number of segments for the corresponding cache, Data Grid cannot read data from the cache store. Add the target. prefix to all configuration properties for the target cache store. Example target cache store 3.2.1. Configuration properties for the cache store migrator Configure source and target cache stores in a StoreMigrator properties. Table 3.1. Cache Store Type Property Property Description Required/Optional type Specifies the type of cache store for a source or target cache store. .type=JDBC_STRING .type=JDBC_BINARY .type=JDBC_MIXED .type=LEVELDB .type=ROCKSDB .type=SINGLE_FILE_STORE .type=SOFT_INDEX_FILE_STORE .type=JDBC_MIXED Required Table 3.2. Common Properties Property Description Example Value Required/Optional cache_name The name of the cache that you want to back up. .cache_name=myCache Required segment_count The number of segments for target cache stores that can use segmentation. The number of segments must match clustering.hash.numSegments in the Data Grid configuration. If the number of segments for a cache store does not match the number of segments for the corresponding cache, Data Grid cannot read data from the cache store. .segment_count=256 Optional marshaller.class Specifies a custom marshaller class. Required if using custom marshallers. marshaller.allow-list.classes Specifies a comma-separated list of fully qualified class names that are allowed to be deserialized. Optional marshaller.allow-list.regexps Specifies a comma-separated list of regular expressions that determine which classes are allowed be deserialized. Optional marshaller.externalizers Specifies a comma-separated list of custom AdvancedExternalizer implementations to load in this format: [id]:<Externalizer class> Optional Table 3.3. JDBC Properties Property Description Required/Optional dialect Specifies the dialect of the underlying database. Required version Specifies the marshaller version for source cache stores. Set one of the following values: * 8 for Data Grid 7.2.x * 9 for Data Grid 7.3.x * 10 for Data Grid 8.0.x * 11 for Data Grid 8.1.x * 12 for Data Grid 8.2.x * 13 for Data Grid 8.3.x Required for source stores only. connection_pool.connection_url Specifies the JDBC connection URL. Required connection_pool.driver_class Specifies the class of the JDBC driver. Required connection_pool.username Specifies a database username. Required connection_pool.password Specifies a password for the database username. Required db.disable_upsert Disables database upsert. Optional db.disable_indexing Specifies if table indexes are created. Optional table.string.table_name_prefix Specifies additional prefixes for the table name. Optional table.string.<id|data|timestamp>.name Specifies the column name. Required table.string.<id|data|timestamp>.type Specifies the column type. Required key_to_string_mapper Specifies the TwoWayKey2StringMapper class. Optional Note To migrate from Binary cache stores in older Data Grid versions, change table.string.* to table.binary.\* in the following properties: source.table.binary.table_name_prefix source.table.binary.<id\|data\|timestamp>.name source.table.binary.<id\|data\|timestamp>.type Table 3.4. RocksDB Properties Property Description Required/Optional location Sets the database directory. Required compression Specifies the compression type to use. Optional Table 3.5. SingleFileStore Properties Property Description Required/Optional location Sets the directory that contains the cache store .dat file. Required Table 3.6. SoftIndexFileStore Properties Property Description Value Required/Optional location Sets the database directory. Required index_location Sets the database index directory. 3.3. Migrating Data Grid cache stores Run the store migrator to migrate data from one cache store to another. Prerequisites Get the Data Grid CLI. Create a migrator.properties file that configures the source and target cache stores. Procedure Run the migrate store -p /path/to/migrator.properties CLI command
[ "source.type=SOFT_INDEX_FILE_STORE source.cache_name=myCache source.location=/path/to/source/sifs source.version=<version>", "target.type=SINGLE_FILE_STORE target.cache_name=myCache target.location=/path/to/target/sfs.dat", "Example configuration for migrating to a JDBC String-Based cache store target.type=STRING target.cache_name=myCache target.dialect=POSTGRES target.marshaller.class=org.infinispan.commons.marshall.JavaSerializationMarshaller target.marshaller.allow-list.classes=org.example.Person,org.example.Animal target.marshaller.allow-list.regexps=\"org.another.example.*\" target.marshaller.externalizers=25:Externalizer1,org.example.Externalizer2 target.connection_pool.connection_url=jdbc:postgresql:postgres target.connection_pool.driver_class=org.postrgesql.Driver target.connection_pool.username=postgres target.connection_pool.password=redhat target.db.disable_upsert=false target.db.disable_indexing=false target.table.string.table_name_prefix=tablePrefix target.table.string.id.name=id_column target.table.string.data.name=datum_column target.table.string.timestamp.name=timestamp_column target.table.string.id.type=VARCHAR target.table.string.data.type=bytea target.table.string.timestamp.type=BIGINT target.key_to_string_mapper=org.infinispan.persistence.keymappers. DefaultTwoWayKey2StringMapper", "Example configuration for migrating from a RocksDB cache store. source.type=ROCKSDB source.cache_name=myCache source.location=/path/to/rocksdb/database source.compression=SNAPPY", "Example configuration for migrating to a Single File cache store. target.type=SINGLE_FILE_STORE target.cache_name=myCache target.location=/path/to/sfs.dat", "Example configuration for migrating to a Soft-Index File cache store. target.type=SOFT_INDEX_FILE_STORE target.cache_name=myCache target.location=path/to/sifs/database target.location=path/to/sifs/index" ]
https://docs.redhat.com/en/documentation/red_hat_data_grid/8.5/html/upgrading_data_grid/migrating-data-between-stores
Appendix B. Cluster Creation in Red Hat Enterprise Linux 6 and Red Hat Enterprise Linux 7
Appendix B. Cluster Creation in Red Hat Enterprise Linux 6 and Red Hat Enterprise Linux 7 Configuring a Red Hat High Availability Cluster in Red Hat Enterprise Linux 7 with Pacemaker requires a different set of configuration tools with a different administrative interface than configuring a cluster in Red Hat Enterprise Linux 6 with rgmanager . Section B.1, "Cluster Creation with rgmanager and with Pacemaker" summarizes the configuration differences between the various cluster components. Red Hat Enterprise Linux 6.5 and later releases support cluster configuration with Pacemaker, using the pcs configuration tool. Section B.2, "Pacemaker Installation in Red Hat Enterprise Linux 6 and Red Hat Enterprise Linux 7" summarizes the Pacemaker installation differences between Red Hat Enterprise Linux 6 and Red Hat Enterprise Linux 7. B.1. Cluster Creation with rgmanager and with Pacemaker Table B.1, "Comparison of Cluster Configuration with rgmanager and with Pacemaker" provides a comparative summary of how you configure the components of a cluster with rgmanager in Red Hat Enterprise Linux 6 and with Pacemaker in Red Hat Enterprise Linux 7. Table B.1. Comparison of Cluster Configuration with rgmanager and with Pacemaker Configuration Component rgmanager Pacemaker Cluster configuration file The cluster configuration file on each node is cluster.conf file, which can can be edited directly. Otherwise, use the luci or ccs interface to define the cluster configuration. The cluster and Pacemaker configuration files are corosync.conf and cib.xml . Do not edit the cib.xml file directly; use the pcs or pcsd interface instead. Network setup Configure IP addresses and SSH before configuring the cluster. Configure IP addresses and SSH before configuring the cluster. Cluster Configuration Tools luci , ccs command, manual editing of cluster.conf file. pcs or pcsd . Installation Install rgmanager (which pulls in all dependencies, including ricci , luci , and the resource and fencing agents). If needed, install lvm2-cluster and gfs2-utils . Install pcs , and the fencing agents you require. If needed, install lvm2-cluster and gfs2-utils . Starting cluster services Start and enable cluster services with the following procedure: Start rgmanager , cman , and, if needed, clvmd and gfs2 . Start ricci , and start luci if using the luci interface. Run chkconfig on for the needed services so that they start at each runtime. Alternately, you can enter ccs --start to start and enable the cluster services. Start and enable cluster services with the following procedure: On every node, execute systemctl start pcsd.service , then systemctl enable pcsd.service to enable pcsd to start at runtime. On one node in the cluster, enter pcs cluster start --all to start corosync and pacemaker . Controlling access to configuration tools For luci , the root user or a user with luci permissions can access luci . All access requires the ricci password for the node. The pcsd gui requires that you authenticate as user hacluster , which is the common system user. The root user can set the password for hacluster . Cluster creation Name the cluster and define which nodes to include in the cluster with luci or ccs , or directly edit the cluster.conf file. Name the cluster and include nodes with pcs cluster setup command or with the pcsd Web UI. You can add nodes to an existing cluster with the pcs cluster node add command or with the pcsd Web UI. Propagating cluster configuration to all nodes When configuration a cluster with luci , propagation is automatic. With ccs , use the --sync option. You can also use the cman_tool version -r command. Propagation of the cluster and Pacemaker configuration files, corosync.conf and cib.xml , is automatic on cluster setup or when adding a node or resource. Global cluster properties The following feature are supported with rgmanager in Red Hat Enterprise Linux 6: * You can configure the system so that the system chooses which multicast address to use for IP multicasting in the cluster network. * If IP multicasting is not available, you can use UDP Unicast transport mechanism. * You can configure a cluster to use RRP protocol. Pacemaker in Red Hat Enterprise Linux 7 supports the following features for a cluster: * You can set no-quorum-policy for the cluster to specify what the system should do when the cluster does not have quorum. * For additional cluster properties you can set, see Table 12.1, "Cluster Properties" . Logging You can set global and daemon-specific logging configuration. See the file /etc/sysconfig/pacemaker for information on how to configure logging manually. Validating the cluster Cluster validation is automatic with luci and with ccs , using the cluster schema. The cluster is automatically validated on startup. The cluster is automatically validated on startup, or you can validate the cluster with pcs cluster verify . Quorum in two-node clusters With a two-node cluster, you can configure how the system determines quorum: * Configure a quorum disk * Use ccs or edit the cluster.conf file to set two_node=1 and expected_votes=1 to allow a single node to maintain quorum. pcs automatically adds the necessary options for a two-node cluster to corosync . Cluster status On luci , the current status of the cluster is visible in the various components of the interface, which can be refreshed. You can use the --getconf option of the ccs command to see current the configuration file. You can use the clustat command to display cluster status. You can display the current cluster status with the pcs status command. Resources You add resources of defined types and configure resource-specific properties with luci or the ccs command, or by editing the cluster.conf configuration file. You add resources of defined types and configure resource-specific properties with the pcs resource create command or with the pcsd Web UI. For general information on configuring cluster resources with Pacemaker see Chapter 6, Configuring Cluster Resources . Resource behavior, grouping, and start/stop order Define cluster services to configure how resources interact. With Pacemaker, you use resource groups as a shorthand method of defining a set of resources that need to be located together and started and stopped sequentially. In addition, you define how resources behave and interact in the following ways: * You set some aspects of resource behavior as resource options. * You use location constraints to determine which nodes a resource can run on. * You use order constraints to determine the order in which resources run. * You use colocation constraints to determine that the location of one resource depends on the location of another resource. For more complete information on these topics, see Chapter 6, Configuring Cluster Resources and Chapter 7, Resource Constraints . Resource administration: Moving, starting, stopping resources With luci , you can manage clusters, individual cluster nodes, and cluster services. With the ccs command, you can manage cluster. You can use the clusvadm to manage cluster services. You can temporarily disable a node so that it cannot host resources with the pcs cluster standby command, which causes the resources to migrate. You can stop a resource with the pcs resource disable command. Removing a cluster configuration completely With luci , you can select all nodes in a cluster for deletion to delete a cluster entirely. You can also remove the cluster.conf from each node in the cluster. You can remove a cluster configuration with the pcs cluster destroy command. Resources active on multiple nodes, resources active on multiple nodes in multiple modes No equivalent. With Pacemaker, you can clone resources so that they can run in multiple nodes, and you can define cloned resources as master and slave resources so that they can run in multiple modes. For information on cloned resources and master/slave resources, see Chapter 9, Advanced Configuration . Fencing -- single fence device per node Create fencing devices globally or locally and add them to nodes. You can define post-fail delay and post-join delay values for the cluster as a whole. Create a fencing device for each node with the pcs stonith create command or with the pcsd Web UI. For devices that can fence multiple nodes, you need to define them only once rather than separately for each node. You can also define pcmk_host_map to configure fencing devices for all nodes with a single command; for information on pcmk_host_map see Table 5.1, "General Properties of Fencing Devices" . You can define the stonith-timeout value for the cluster as a whole. Multiple (backup) fencing devices per node Define backup devices with luci or the ccs command, or by editing the cluster.conf file directly. Configure fencing levels.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/high_availability_add-on_reference/ap-ha-rhel6-rhel7-HAAR
Chapter 3. Configuring IAM for IBM Cloud
Chapter 3. Configuring IAM for IBM Cloud In environments where the cloud identity and access management (IAM) APIs are not reachable, you must put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. 3.1. Alternatives to storing administrator-level secrets in the kube-system project The Cloud Credential Operator (CCO) manages cloud provider credentials as Kubernetes custom resource definitions (CRDs). You can configure the CCO to suit the security requirements of your organization by setting different values for the credentialsMode parameter in the install-config.yaml file. Storing an administrator-level credential secret in the cluster kube-system project is not supported for IBM Cloud(R); therefore, you must set the credentialsMode parameter for the CCO to Manual when installing OpenShift Container Platform and manage your cloud credentials manually. Using manual mode allows each cluster component to have only the permissions it requires, without storing an administrator-level credential in the cluster. You can also use this mode if your environment does not have connectivity to the cloud provider public IAM endpoint. However, you must manually reconcile permissions with new release images for every upgrade. You must also manually supply credentials for every component that requests them. Additional resources About the Cloud Credential Operator 3.2. Configuring the Cloud Credential Operator utility To create and manage cloud credentials from outside of the cluster when the Cloud Credential Operator (CCO) is operating in manual mode, extract and prepare the CCO utility ( ccoctl ) binary. Note The ccoctl utility is a Linux binary that must run in a Linux environment. Prerequisites You have access to an OpenShift Container Platform account with cluster administrator access. You have installed the OpenShift CLI ( oc ). Procedure Set a variable for the OpenShift Container Platform release image by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Obtain the CCO container image from the OpenShift Container Platform release image by running the following command: USD CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret) Note Ensure that the architecture of the USDRELEASE_IMAGE matches the architecture of the environment in which you will use the ccoctl tool. Extract the ccoctl binary from the CCO container image within the OpenShift Container Platform release image by running the following command: USD oc image extract USDCCO_IMAGE --file="/usr/bin/ccoctl" -a ~/.pull-secret Change the permissions to make ccoctl executable by running the following command: USD chmod 775 ccoctl Verification To verify that ccoctl is ready to use, display the help file. Use a relative file name when you run the command, for example: USD ./ccoctl.rhel9 Example output OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: alibabacloud Manage credentials objects for alibaba cloud aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for IBM Cloud nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use "ccoctl [command] --help" for more information about a command. Additional resources Rotating API keys for IBM Cloud(R) 3.3. steps Installing a cluster on IBM Cloud(R) with customizations 3.4. Additional resources Preparing to update a cluster with manually maintained credentials
[ "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl\" -a ~/.pull-secret", "chmod 775 ccoctl", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: alibabacloud Manage credentials objects for alibaba cloud aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for IBM Cloud nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command." ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/installing_on_ibm_cloud/configuring-iam-ibm-cloud
Providing feedback on Red Hat documentation
Providing feedback on Red Hat documentation We appreciate your input on our documentation. Do let us know how we can make it better. To give feedback, create a Bugzilla ticket: Go to the Bugzilla website. In the Component section, choose documentation . Fill in the Description field with your suggestion for improvement. Include a link to the relevant part(s) of documentation. Click Submit Bug . Red Hat OpenShift Data Foundation 4.12 supports deployment of Red Hat OpenShift on IBM Cloud clusters in connected environments.
null
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.13/html/deploying_openshift_data_foundation_using_ibm_cloud/providing-feedback-on-red-hat-documentation_rhodf
Chapter 2. Storage
Chapter 2. Storage 2.1. Storage Domains Overview A storage domain is a collection of images that have a common storage interface. A storage domain contains complete images of templates and virtual machines (including snapshots), ISO files, and metadata about themselves. A storage domain can be made of either block devices (SAN - iSCSI or FCP) or a file system (NAS - NFS, GlusterFS, or other POSIX compliant file systems). Note GlusterFS Storage is deprecated, and will no longer be supported in future releases. On NAS, all virtual disks, templates, and snapshots are files. On SAN (iSCSI/FCP), each virtual disk, template or snapshot is a logical volume. Block devices are aggregated into a logical entity called a volume group, and then divided by LVM (Logical Volume Manager) into logical volumes for use as virtual hard disks. See Red Hat Enterprise Linux Configuring and managing logical volumes for more information on LVM. Virtual disks can have one of two formats, either QCOW2 or raw. The type of storage can be either sparse or preallocated. Snapshots are always sparse but can be taken for disks of either format. Virtual machines that share the same storage domain can be migrated between hosts that belong to the same cluster.
null
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.4/html/technical_reference/chap-storage
5.177. ltrace
5.177. ltrace 5.177.1. RHBA-2012:0926 - ltrace bug fix and enhancement update Updated ltrace packages that fix multiple bugs and add one enhancement are now available for Red Hat Enterprise Linux 6. The ltrace utility is a debugging program that runs a specified command until the command exits. While the command is executing, ltrace intercepts and records both the dynamic library calls called by the executed process and the signals received by the executed process. The ltrace utility can also intercept and print system calls executed by the process. Bug Fixes BZ# 742340 Prior to this update, a traced process that had more than one thread could be aborted if the threads ran into breakpoints which the ltrace utility did not handle. With this update, ltrace attaches to the newly created threads, and carefully handles the breakpoints so that tracing events are not missed. This update also improves the detach logic so that a running process to which ltrace has been attached is left in a consistent state before detaching. BZ# 811184 Prior to this update, the ltrace utility could, under certain circumstances, fail to trace returns from functions which where called with a tail call optimization. This update adds support for tracing returns from functions called with a tail call optimization. Enhancement BZ# 738254 Prior to this update, ltrace could not trace library functions loaded via libdl. This update changes the behavior of the "-x" option for placing static breakpoints so that dynamic libraries are also considered and breakpoints set in them. This works for dynamic libraries that are linked to the binary as well as those that are opened with the function "dlopen" in runtime. All users of ltrace are advised to upgrade to this updated package, which fixes these bugs and adds this update.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.3_technical_notes/ltrace
Chapter 10. Troubleshooting
Chapter 10. Troubleshooting This section provides information for troubleshooting common migration issues. 10.1. Error messages This section describes error messages and how to resolve them. warm import retry limit reached The warm import retry limit reached error message is displayed during a warm migration if a VMware virtual machine (VM) has reached the maximum number (28) of changed block tracking (CBT) snapshots during the precopy stage. To resolve this problem, delete some of the CBT snapshots from the VM and restart the migration plan. Unable to resize disk image to required size The Unable to resize disk image to required size error message is displayed when migration fails because a virtual machine on the target provider uses persistent volumes with an EXT4 file system on block storage. The problem occurs because the default overhead that is assumed by CDI does not completely include the reserved place for the root partition. To resolve this problem, increase the file system overhead in CDI to more than 10%. 10.2. Using the must-gather tool You can collect logs and information about MTV custom resources (CRs) by using the must-gather tool. You must attach a must-gather data file to all customer cases. You can gather data for a specific namespace, migration plan, or virtual machine (VM) by using the filtering options. Note If you specify a non-existent resource in the filtered must-gather command, no archive file is created. Prerequisites You must be logged in to the OpenShift Virtualization cluster as a user with the cluster-admin role. You must have the Red Hat OpenShift CLI ( oc ) installed. Collecting logs and CR information Navigate to the directory where you want to store the must-gather data. Run the oc adm must-gather command: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 The data is saved as /must-gather/must-gather.tar.gz . You can upload this file to a support case on the Red Hat Customer Portal . Optional: Run the oc adm must-gather command with the following options to gather filtered data: Namespace: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 \ -- NS=<namespace> /usr/bin/targeted Migration plan: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 \ -- PLAN=<migration_plan> /usr/bin/targeted Virtual machine: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 \ -- VM=<vm_id> NS=<namespace> /usr/bin/targeted 1 1 Specify the VM ID as it appears in the Plan CR. 10.3. Architecture This section describes MTV custom resources, services, and workflows. 10.3.1. MTV custom resources and services The Migration Toolkit for Virtualization (MTV) is provided as an Red Hat OpenShift Operator. It creates and manages the following custom resources (CRs) and services. MTV custom resources Provider CR stores attributes that enable MTV to connect to and interact with the source and target providers. NetworkMapping CR maps the networks of the source and target providers. StorageMapping CR maps the storage of the source and target providers. Plan CR contains a list of VMs with the same migration parameters and associated network and storage mappings. Migration CR runs a migration plan. Only one Migration CR per migration plan can run at a given time. You can create multiple Migration CRs for a single Plan CR. MTV services The Inventory service performs the following actions: Connects to the source and target providers. Maintains a local inventory for mappings and plans. Stores VM configurations. Runs the Validation service if a VM configuration change is detected. The Validation service checks the suitability of a VM for migration by applying rules. The Migration Controller service orchestrates migrations. When you create a migration plan, the Migration Controller service validates the plan and adds a status label. If the plan fails validation, the plan status is Not ready and the plan cannot be used to perform a migration. If the plan passes validation, the plan status is Ready and it can be used to perform a migration. After a successful migration, the Migration Controller service changes the plan status to Completed . The Populator Controller service orchestrates disk transfers using Volume Populators. The Kubevirt Controller and Containerized Data Import (CDI) Controller services handle most technical operations. 10.3.2. High-level migration workflow The high-level workflow shows the migration process from the point of view of the user: You create a source provider, a target provider, a network mapping, and a storage mapping. You create a Plan custom resource (CR) that includes the following resources: Source provider Target provider, if MTV is not installed on the target cluster Network mapping Storage mapping One or more virtual machines (VMs) You run a migration plan by creating a Migration CR that references the Plan CR. If you cannot migrate all the VMs for any reason, you can create multiple Migration CRs for the same Plan CR until all VMs are migrated. For each VM in the Plan CR, the Migration Controller service records the VM migration progress in the Migration CR. Once the data transfer for each VM in the Plan CR completes, the Migration Controller service creates a VirtualMachine CR. When all VMs have been migrated, the Migration Controller service updates the status of the Plan CR to Completed . The power state of each source VM is maintained after migration. 10.3.3. Detailed migration workflow You can use the detailed migration workflow to troubleshoot a failed migration. The workflow describes the following steps: Warm Migration or migration to a remote OpenShift cluster: When you create the Migration custom resource (CR) to run a migration plan, the Migration Controller service creates a DataVolume CR for each source VM disk. For each VM disk: The Containerized Data Importer (CDI) Controller service creates a persistent volume claim (PVC) based on the parameters specified in the DataVolume CR. If the StorageClass has a dynamic provisioner, the persistent volume (PV) is dynamically provisioned by the StorageClass provisioner. The CDI Controller service creates an importer pod. The importer pod streams the VM disk to the PV. After the VM disks are transferred: The Migration Controller service creates a conversion pod with the PVCs attached to it when importing from VMWare. The conversion pod runs virt-v2v , which installs and configures device drivers on the PVCs of the target VM. The Migration Controller service creates a VirtualMachine CR for each source virtual machine (VM), connected to the PVCs. If the VM ran on the source environment, the Migration Controller powers on the VM, the KubeVirt Controller service creates a virt-launcher pod and a VirtualMachineInstance CR. The virt-launcher pod runs QEMU-KVM with the PVCs attached as VM disks. Cold migration from RHV or OpenStack to the local OpenShift cluster: When you create a Migration custom resource (CR) to run a migration plan, the Migration Controller service creates for each source VM disk a PersistentVolumeClaim CR, and an OvirtVolumePopulator when the source is RHV, or an OpenstackVolumePopulator CR when the source is OpenStack. For each VM disk: The Populator Controller service creates a temporarily persistent volume claim (PVC). If the StorageClass has a dynamic provisioner, the persistent volume (PV) is dynamically provisioned by the StorageClass provisioner. The Migration Controller service creates a dummy pod to bind all PVCs . The name of the pod contains pvcinit . The Populator Controller service creates a populator pod. The populator pod transfers the disk data to the PV. After the VM disks are transferred: The temporary PVC is deleted, and the initial PVC points to the PV with the data. The Migration Controller service creates a VirtualMachine CR for each source virtual machine (VM), connected to the PVCs. If the VM ran on the source environment, the Migration Controller powers on the VM, the KubeVirt Controller service creates a virt-launcher pod and a VirtualMachineInstance CR. The virt-launcher pod runs QEMU-KVM with the PVCs attached as VM disks. Cold migration from VMWare to the local OpenShift cluster: When you create a Migration custom resource (CR) to run a migration plan, the Migration Controller service creates a DataVolume CR for each source VM disk. For each VM disk: The Containerized Data Importer (CDI) Controller service creates a blank persistent volume claim (PVC) based on the parameters specified in the DataVolume CR. If the StorageClass has a dynamic provisioner, the persistent volume (PV) is dynamically provisioned by the StorageClass provisioner. For all VM disks: The Migration Controller service creates a dummy pod to bind all PVCs . The name of the pod contains pvcinit . The Migration Controller service creates a conversion pod for all PVCs. The conversion pod runs virt-v2v , which converts the VM to the KVM hypervisor and transfers the disks' data to their corresponding PVs. After the VM disks are transferred: The Migration Controller service creates a VirtualMachine CR for each source virtual machine (VM), connected to the PVCs. If the VM ran on the source environment, the Migration Controller powers on the VM, the KubeVirt Controller service creates a virt-launcher pod and a VirtualMachineInstance CR. The virt-launcher pod runs QEMU-KVM with the PVCs attached as VM disks. 10.4. Logs and custom resources You can download logs and custom resource (CR) information for troubleshooting. For more information, see the detailed migration workflow . 10.4.1. Collected logs and custom resource information You can download logs and custom resource (CR) yaml files for the following targets by using the Red Hat OpenShift web console or the command line interface (CLI): Migration plan: Web console or CLI. Virtual machine: Web console or CLI. Namespace: CLI only. The must-gather tool collects the following logs and CR files in an archive file: CRs: DataVolume CR: Represents a disk mounted on a migrated VM. VirtualMachine CR: Represents a migrated VM. Plan CR: Defines the VMs and storage and network mapping. Job CR: Optional: Represents a pre-migration hook, a post-migration hook, or both. Logs: importer pod: Disk-to-data-volume conversion log. The importer pod naming convention is importer-<migration_plan>-<vm_id><5_char_id> , for example, importer-mig-plan-ed90dfc6-9a17-4a8btnfh , where ed90dfc6-9a17-4a8 is a truncated RHV VM ID and btnfh is the generated 5-character ID. conversion pod: VM conversion log. The conversion pod runs virt-v2v , which installs and configures device drivers on the PVCs of the VM. The conversion pod naming convention is <migration_plan>-<vm_id><5_char_id> . virt-launcher pod: VM launcher log. When a migrated VM is powered on, the virt-launcher pod runs QEMU-KVM with the PVCs attached as VM disks. forklift-controller pod: The log is filtered for the migration plan, virtual machine, or namespace specified by the must-gather command. forklift-must-gather-api pod: The log is filtered for the migration plan, virtual machine, or namespace specified by the must-gather command. hook-job pod: The log is filtered for hook jobs. The hook-job naming convention is <migration_plan>-<vm_id><5_char_id> , for example, plan2j-vm-3696-posthook-4mx85 or plan2j-vm-3696-prehook-mwqnl . Note Empty or excluded log files are not included in the must-gather archive file. Example must-gather archive structure for a VMware migration plan 10.4.2. Downloading logs and custom resource information from the web console You can download logs and information about custom resources (CRs) for a completed, failed, or canceled migration plan or for migrated virtual machines (VMs) by using the Red Hat OpenShift web console. Procedure In the Red Hat OpenShift web console, click Migration Plans for virtualization . Click Get logs beside a migration plan name. In the Get logs window, click Get logs . The logs are collected. A Log collection complete message is displayed. Click Download logs to download the archive file. To download logs for a migrated VM, click a migration plan name and then click Get logs beside the VM. 10.4.3. Accessing logs and custom resource information from the command line interface You can access logs and information about custom resources (CRs) from the command line interface by using the must-gather tool. You must attach a must-gather data file to all customer cases. You can gather data for a specific namespace, a completed, failed, or canceled migration plan, or a migrated virtual machine (VM) by using the filtering options. Note If you specify a non-existent resource in the filtered must-gather command, no archive file is created. Prerequisites You must be logged in to the OpenShift Virtualization cluster as a user with the cluster-admin role. You must have the Red Hat OpenShift CLI ( oc ) installed. Procedure Navigate to the directory where you want to store the must-gather data. Run the oc adm must-gather command: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 The data is saved as /must-gather/must-gather.tar.gz . You can upload this file to a support case on the Red Hat Customer Portal . Optional: Run the oc adm must-gather command with the following options to gather filtered data: Namespace: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 \ -- NS=<namespace> /usr/bin/targeted Migration plan: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 \ -- PLAN=<migration_plan> /usr/bin/targeted Virtual machine: USD oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 \ -- VM=<vm_name> NS=<namespace> /usr/bin/targeted 1 1 You must specify the VM name , not the VM ID, as it appears in the Plan CR.
[ "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 -- NS=<namespace> /usr/bin/targeted", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 -- PLAN=<migration_plan> /usr/bin/targeted", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 -- VM=<vm_id> NS=<namespace> /usr/bin/targeted 1", "must-gather └── namespaces ├── target-vm-ns │ ├── crs │ │ ├── datavolume │ │ │ ├── mig-plan-vm-7595-tkhdz.yaml │ │ │ ├── mig-plan-vm-7595-5qvqp.yaml │ │ │ └── mig-plan-vm-8325-xccfw.yaml │ │ └── virtualmachine │ │ ├── test-test-rhel8-2disks2nics.yaml │ │ └── test-x2019.yaml │ └── logs │ ├── importer-mig-plan-vm-7595-tkhdz │ │ └── current.log │ ├── importer-mig-plan-vm-7595-5qvqp │ │ └── current.log │ ├── importer-mig-plan-vm-8325-xccfw │ │ └── current.log │ ├── mig-plan-vm-7595-4glzd │ │ └── current.log │ └── mig-plan-vm-8325-4zw49 │ └── current.log └── openshift-mtv ├── crs │ └── plan │ └── mig-plan-cold.yaml └── logs ├── forklift-controller-67656d574-w74md │ └── current.log └── forklift-must-gather-api-89fc7f4b6-hlwb6 └── current.log", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 -- NS=<namespace> /usr/bin/targeted", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 -- PLAN=<migration_plan> /usr/bin/targeted", "oc adm must-gather --image=registry.redhat.io/migration-toolkit-virtualization/mtv-must-gather-rhel8:2.6.7 -- VM=<vm_name> NS=<namespace> /usr/bin/targeted 1" ]
https://docs.redhat.com/en/documentation/migration_toolkit_for_virtualization/2.6/html/installing_and_using_the_migration_toolkit_for_virtualization/troubleshooting_mtv
Chapter 7. Memory Tapset
Chapter 7. Memory Tapset This family of probe points is used to probe memory-related events or query the memory usage of the current process. It contains the following probe points:
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/systemtap_tapset_reference/memory_stp
Chapter 5. ConsoleNotification [console.openshift.io/v1]
Chapter 5. ConsoleNotification [console.openshift.io/v1] Description ConsoleNotification is the extension for configuring openshift web console notifications. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). Type object Required spec 5.1. Specification Property Type Description apiVersion string APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources kind string Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds metadata ObjectMeta Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata spec object ConsoleNotificationSpec is the desired console notification configuration. 5.1.1. .spec Description ConsoleNotificationSpec is the desired console notification configuration. Type object Required text Property Type Description backgroundColor string backgroundColor is the color of the background for the notification as CSS data type color. color string color is the color of the text for the notification as CSS data type color. link object link is an object that holds notification link details. location string location is the location of the notification in the console. Valid values are: "BannerTop", "BannerBottom", "BannerTopBottom". text string text is the visible text of the notification. 5.1.2. .spec.link Description link is an object that holds notification link details. Type object Required href text Property Type Description href string href is the absolute secure URL for the link (must use https) text string text is the display text for the link 5.2. API endpoints The following API endpoints are available: /apis/console.openshift.io/v1/consolenotifications DELETE : delete collection of ConsoleNotification GET : list objects of kind ConsoleNotification POST : create a ConsoleNotification /apis/console.openshift.io/v1/consolenotifications/{name} DELETE : delete a ConsoleNotification GET : read the specified ConsoleNotification PATCH : partially update the specified ConsoleNotification PUT : replace the specified ConsoleNotification /apis/console.openshift.io/v1/consolenotifications/{name}/status GET : read status of the specified ConsoleNotification PATCH : partially update status of the specified ConsoleNotification PUT : replace status of the specified ConsoleNotification 5.2.1. /apis/console.openshift.io/v1/consolenotifications Table 5.1. Global query parameters Parameter Type Description pretty string If 'true', then the output is pretty printed. HTTP method DELETE Description delete collection of ConsoleNotification Table 5.2. Query parameters Parameter Type Description allowWatchBookmarks boolean allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. continue string The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the key, but from the latest snapshot, which is inconsistent from the list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the " key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. fieldSelector string A selector to restrict the list of returned objects by their fields. Defaults to everything. labelSelector string A selector to restrict the list of returned objects by their labels. Defaults to everything. limit integer limit is a maximum number of responses to return for a list call. If more items exist, the server will set the continue field on the list metadata to a value that can be used with the same initial query to retrieve the set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. resourceVersion string resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset resourceVersionMatch string resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset timeoutSeconds integer Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. watch boolean Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. Table 5.3. HTTP responses HTTP code Reponse body 200 - OK Status schema 401 - Unauthorized Empty HTTP method GET Description list objects of kind ConsoleNotification Table 5.4. Query parameters Parameter Type Description allowWatchBookmarks boolean allowWatchBookmarks requests watch events with type "BOOKMARK". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. continue string The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the key, but from the latest snapshot, which is inconsistent from the list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the " key". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. fieldSelector string A selector to restrict the list of returned objects by their fields. Defaults to everything. labelSelector string A selector to restrict the list of returned objects by their labels. Defaults to everything. limit integer limit is a maximum number of responses to return for a list call. If more items exist, the server will set the continue field on the list metadata to a value that can be used with the same initial query to retrieve the set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. resourceVersion string resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset resourceVersionMatch string resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset timeoutSeconds integer Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. watch boolean Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. Table 5.5. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotificationList schema 401 - Unauthorized Empty HTTP method POST Description create a ConsoleNotification Table 5.6. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldManager string fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint . fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the ServerSideFieldValidation feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the ServerSideFieldValidation feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the ServerSideFieldValidation feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 5.7. Body parameters Parameter Type Description body ConsoleNotification schema Table 5.8. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 201 - Created ConsoleNotification schema 202 - Accepted ConsoleNotification schema 401 - Unauthorized Empty 5.2.2. /apis/console.openshift.io/v1/consolenotifications/{name} Table 5.9. Global path parameters Parameter Type Description name string name of the ConsoleNotification Table 5.10. Global query parameters Parameter Type Description pretty string If 'true', then the output is pretty printed. HTTP method DELETE Description delete a ConsoleNotification Table 5.11. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed gracePeriodSeconds integer The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. orphanDependents boolean Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the "orphan" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. propagationPolicy string Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. Table 5.12. Body parameters Parameter Type Description body DeleteOptions schema Table 5.13. HTTP responses HTTP code Reponse body 200 - OK Status schema 202 - Accepted Status schema 401 - Unauthorized Empty HTTP method GET Description read the specified ConsoleNotification Table 5.14. Query parameters Parameter Type Description resourceVersion string resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset Table 5.15. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 401 - Unauthorized Empty HTTP method PATCH Description partially update the specified ConsoleNotification Table 5.16. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldManager string fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint . fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the ServerSideFieldValidation feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the ServerSideFieldValidation feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the ServerSideFieldValidation feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 5.17. Body parameters Parameter Type Description body Patch schema Table 5.18. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 401 - Unauthorized Empty HTTP method PUT Description replace the specified ConsoleNotification Table 5.19. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldManager string fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint . fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the ServerSideFieldValidation feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the ServerSideFieldValidation feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the ServerSideFieldValidation feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 5.20. Body parameters Parameter Type Description body ConsoleNotification schema Table 5.21. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 201 - Created ConsoleNotification schema 401 - Unauthorized Empty 5.2.3. /apis/console.openshift.io/v1/consolenotifications/{name}/status Table 5.22. Global path parameters Parameter Type Description name string name of the ConsoleNotification Table 5.23. Global query parameters Parameter Type Description pretty string If 'true', then the output is pretty printed. HTTP method GET Description read status of the specified ConsoleNotification Table 5.24. Query parameters Parameter Type Description resourceVersion string resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset Table 5.25. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 401 - Unauthorized Empty HTTP method PATCH Description partially update status of the specified ConsoleNotification Table 5.26. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldManager string fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint . fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the ServerSideFieldValidation feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the ServerSideFieldValidation feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the ServerSideFieldValidation feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 5.27. Body parameters Parameter Type Description body Patch schema Table 5.28. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 401 - Unauthorized Empty HTTP method PUT Description replace status of the specified ConsoleNotification Table 5.29. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldManager string fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint . fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the ServerSideFieldValidation feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the ServerSideFieldValidation feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the ServerSideFieldValidation feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 5.30. Body parameters Parameter Type Description body ConsoleNotification schema Table 5.31. HTTP responses HTTP code Reponse body 200 - OK ConsoleNotification schema 201 - Created ConsoleNotification schema 401 - Unauthorized Empty
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.12/html/console_apis/consolenotification-console-openshift-io-v1
Partner Integration
Partner Integration Red Hat OpenStack Platform 16.0 Integrating certified third party software and hardware in a Red Hat OpenStack Platform environment OpenStack Documentation Team [email protected]
null
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.0/html/partner_integration/index
probe::nfsd.proc.create
probe::nfsd.proc.create Name probe::nfsd.proc.create - NFS server creating a file for client Synopsis nfsd.proc.create Values proto transfer protocol filename file name client_ip the ip address of client uid requester's user id version nfs version gid requester's group id fh file handle (the first part is the length of the file handle) filelen length of file name
null
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/systemtap_tapset_reference/api-nfsd-proc-create
2.2.2.6. Summary of Differences
2.2.2.6. Summary of Differences This section lists the difference in commands and options in Red Hat Enterprise Linux 6: Commands removed: key langsupport mouse Commands deprecated: monitor xconfig --resolution Commands added: fcoe group rescue sshpw updates
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/migration_planning_guide/sect-migration_guide-installation-graphical_installer-kickstart-summary_of_differences
5.335. trace-cmd
5.335. trace-cmd 5.335.1. RHEA-2012:0976 - trace-cmd enhancement update Updated trace-cmd packages that add one enhancement are now available for Red Hat Enterprise Linux 6. The trace-cmd packages contain a command line tool that interfaces with ftrace in the kernel. Enhancement BZ# 632061 This update adds support for the "-i" option that can be used to ignore events. By default, if an event is listed but cannot be found by the trace-cmd utility on the system, the utility exits. This option allows trace-cmd execution to continue even when an event is listed on the command line but cannot be found on the system. All users of trace-cmd are advised to upgrade to these updated packages, which add this enhancement.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.3_technical_notes/trace-cmd
Chapter 3. Setting Up Load Balancer Add-On
Chapter 3. Setting Up Load Balancer Add-On Load Balancer Add-On consists of two basic groups: the LVS routers and the real servers. To prevent a single point of failure, each groups should contain at least two member systems. The LVS router group should consist of two identical or very similar systems running Red Hat Enterprise Linux. One will act as the active LVS router while the other stays in hot standby mode, so they need to have as close to the same capabilities as possible. Before choosing and configuring the hardware for the real server group, determine which of the three Load Balancer Add-On topologies to use. 3.1. The NAT Load Balancer Add-On Network The NAT topology allows for great latitude in utilizing existing hardware, but it is limited in its ability to handle large loads because all packets going into and coming out of the pool pass through the Load Balancer Add-On router. Network Layout The topology for Load Balancer Add-On using NAT routing is the easiest to configure from a network layout perspective because only one access point to the public network is needed. The real servers pass all requests back through the LVS router so they are on their own private network. Hardware The NAT topology is the most flexible in regards to hardware because the real servers do not need to be Linux machines to function correctly. In a NAT topology, each real server only needs one NIC since it will only be responding to the LVS router. The LVS routers, on the other hand, need two NICs each to route traffic between the two networks. Because this topology creates a network bottleneck at the LVS router, gigabit Ethernet NICs can be employed on each LVS router to increase the bandwidth the LVS routers can handle. If gigabit Ethernet is employed on the LVS routers, any switch connecting the real servers to the LVS routers must have at least two gigabit Ethernet ports to handle the load efficiently. Software Because the NAT topology requires the use of iptables for some configurations, there can be a fair amount of software configuration outside of Piranha Configuration Tool . In particular, FTP services and the use of firewall marks requires extra manual configuration of the LVS routers to route requests properly. 3.1.1. Configuring Network Interfaces for Load Balancer Add-On with NAT To set up Load Balancer Add-On with NAT, you must first configure the network interfaces for the public network and the private network on the LVS routers. In this example, the LVS routers' public interfaces ( eth0 ) will be on the 192.168.26/24 network (This is not a routable IP, but assume there is a firewall in front of the LVS router) and the private interfaces which link to the real servers ( eth1 ) will be on the 10.11.12/24 network. Important Note that editing of the following files pertain to the network service. The Load Balancer Add-on is not compatible with the NetworkManager service. So on the active or primary LVS router node, the public interface's network script, /etc/sysconfig/network-scripts/ifcfg-eth0 , could look something like this: The /etc/sysconfig/network-scripts/ifcfg-eth1 for the private NAT interface on the LVS router could look something like this: In this example, the VIP for the LVS router's public interface will be 192.168.26.10 and the VIP for the NAT or private interface will be 10.11.12.10. So, it is essential that the real servers route requests back to the VIP for the NAT interface. Important The sample Ethernet interface configuration settings in this section are for the real IP addresses of an LVS router and not the floating IP addresses. To configure the public and private floating IP addresses the administrator should use the Piranha Configuration Tool, as shown in Section 4.4, "GLOBAL SETTINGS" and Section 4.6.1, "The VIRTUAL SERVER Subsection" . After configuring the primary LVS router node's network interfaces, configure the backup LVS router's real network interfaces - taking care that none of the IP address conflict with any other IP addresses on the network. Important Be sure each interface on the backup node services the same network as the interface on primary node. For instance, if eth0 connects to the public network on the primary node, it must also connect to the public network on the backup node as well. 3.1.2. Routing on the Real Servers The most important thing to remember when configuring the real servers network interfaces in a NAT topology is to set the gateway for the NAT floating IP address of the LVS router. In this example, that address is 10.11.12.10. Note Once the network interfaces are up on the real servers, the machines will be unable to ping or connect in other ways to the public network. This is normal. You will, however, be able to ping the real IP for the LVS router's private interface, in this case 10.11.12.9. So the real server's /etc/sysconfig/network-scripts/ifcfg-eth0 file could look similar to this: Warning If a real server has more than one network interface configured with a GATEWAY= line, the first one to come up will get the gateway. Therefore if both eth0 and eth1 are configured and eth1 is used for Load Balancer Add-On, the real servers may not route requests properly. It is best to turn off extraneous network interfaces by setting ONBOOT= no in their network scripts within the /etc/sysconfig/network-scripts/ directory or by making sure the gateway is correctly set in the interface which comes up first. 3.1.3. Enabling NAT Routing on the LVS Routers In a simple NAT Load Balancer Add-On configuration where each clustered service uses only one port, like HTTP on port 80, the administrator needs only to enable packet forwarding on the LVS routers for the requests to be properly routed between the outside world and the real servers. See Section 2.5, "Turning on Packet Forwarding" for instructions on turning on packet forwarding. However, more configuration is necessary when the clustered services require more than one port to go to the same real server during a user session. For information on creating multi-port services using firewall marks, see Section 3.4, "Multi-port Services and Load Balancer Add-On" . Once forwarding is enabled on the LVS routers and the real servers are set up and have the clustered services running, use the Piranha Configuration Tool to configure Load Balancer Add-On as shown in Chapter 4, Configuring the Load Balancer Add-On with Piranha Configuration Tool . Warning Do not configure the floating IP for eth0:1 or eth1:1 by manually editing network scripts or using a network configuration tool. Instead, use the Piranha Configuration Tool as shown in Section 4.4, "GLOBAL SETTINGS" and Section 4.6.1, "The VIRTUAL SERVER Subsection" . When finished, start the pulse service as shown in Section 4.8, "Starting the Load Balancer Add-On" . Once pulse is up and running, the active LVS router will begin routing requests to the pool of real servers.
[ "DEVICE=eth0 BOOTPROTO=static ONBOOT=yes IPADDR=192.168.26.9 NETMASK=255.255.255.0 GATEWAY=192.168.26.254", "DEVICE=eth1 BOOTPROTO=static ONBOOT=yes IPADDR=10.11.12.9 NETMASK=255.255.255.0", "DEVICE=eth0 ONBOOT=yes BOOTPROTO=static IPADDR=10.11.12.1 NETMASK=255.255.255.0 GATEWAY=10.11.12.10" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/load_balancer_administration/ch-lvs-setup-vsa
5.9.3.2.2. Viewing /proc/mounts
5.9.3.2.2. Viewing /proc/mounts The /proc/mounts file is part of the proc virtual file system. As with the other files under /proc/ , the mounts "file" does not exist on any disk drive in your Red Hat Enterprise Linux system. In fact, it is not even a file; instead it is a representation of system status made available (by the Linux kernel) in file form. Using the command cat /proc/mounts , we can view the status of all mounted file systems: As we can see from the above example, the format of /proc/mounts is very similar to that of /etc/mtab . There are a number of file systems mounted that have nothing to do with disk drives. Among these are the /proc/ file system itself (along with two other file systems mounted under /proc/ ), pseudo-ttys, and shared memory. While the format is admittedly not very user-friendly, looking at /proc/mounts is the best way to be 100% sure of seeing what is mounted on your Red Hat Enterprise Linux system, as the kernel is providing this information. Other methods can, under rare circumstances, be inaccurate. However, most of the time you will likely use a command with more easily-read (and useful) output. The section describes that command.
[ "rootfs / rootfs rw 0 0 /dev/root / ext3 rw 0 0 /proc /proc proc rw 0 0 usbdevfs /proc/bus/usb usbdevfs rw 0 0 /dev/sda1 /boot ext3 rw 0 0 none /dev/pts devpts rw 0 0 /dev/sda4 /home ext3 rw 0 0 none /dev/shm tmpfs rw 0 0 none /proc/sys/fs/binfmt_misc binfmt_misc rw 0 0" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/introduction_to_system_administration/s4-storage-mounting-proc
Chapter 5. About the Migration Toolkit for Containers
Chapter 5. About the Migration Toolkit for Containers The Migration Toolkit for Containers (MTC) enables you to migrate stateful application workloads from OpenShift Container Platform 3 to 4.7 at the granularity of a namespace. Important Before you begin your migration, be sure to review the differences between OpenShift Container Platform 3 and 4 . The MTC console is installed on the target cluster by default. You can configure the Migration Toolkit for Containers Operator to install the console on an OpenShift Container Platform 3 source cluster or on a remote cluster . MTC supports the file system and snapshot data copy methods for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. The service catalog is deprecated in OpenShift Container Platform 4. You can migrate workload resources provisioned with the service catalog from OpenShift Container Platform 3 to 4 but you cannot perform service catalog actions such as provision , deprovision , or update on these workloads after migration. The MTC console displays a message if the service catalog resources cannot be migrated. 5.1. Terminology Table 5.1. MTC terminology Term Definition Source cluster Cluster from which the applications are migrated. Destination cluster [1] Cluster to which the applications are migrated. Replication repository Object storage used for copying images, volumes, and Kubernetes objects during indirect migration or for Kubernetes objects during direct volume migration or direct image migration. The replication repository must be accessible to all clusters. Host cluster Cluster on which the migration-controller pod and the web console are running. The host cluster is usually the destination cluster but this is not required. The host cluster does not require an exposed registry route for direct image migration. Remote cluster A remote cluster is usually the source cluster but this is not required. A remote cluster requires a Secret custom resource that contains the migration-controller service account token. A remote cluster requires an exposed secure registry route for direct image migration. Indirect migration Images, volumes, and Kubernetes objects are copied from the source cluster to the replication repository and then from the replication repository to the destination cluster. Direct volume migration Persistent volumes are copied directly from the source cluster to the destination cluster. Direct image migration Images are copied directly from the source cluster to the destination cluster. Stage migration Data is copied to the destination cluster without stopping the application. Running a stage migration multiple times reduces the duration of the cutover migration. Cutover migration The application is stopped on the source cluster and its resources are migrated to the destination cluster. State migration Application state is migrated by copying specific persistent volume claims. Rollback migration Rollback migration rolls back a completed migration. 1 Called the target cluster in the MTC web console. 5.2. MTC workflow You can migrate Kubernetes resources, persistent volume data, and internal container images to OpenShift Container Platform 4.7 by using the Migration Toolkit for Containers (MTC) web console or the Kubernetes API. MTC migrates the following resources: A namespace specified in a migration plan. Namespace-scoped resources: When the MTC migrates a namespace, it migrates all the objects and resources associated with that namespace, such as services or pods. Additionally, if a resource that exists in the namespace but not at the cluster level depends on a resource that exists at the cluster level, the MTC migrates both resources. For example, a security context constraint (SCC) is a resource that exists at the cluster level and a service account (SA) is a resource that exists at the namespace level. If an SA exists in a namespace that the MTC migrates, the MTC automatically locates any SCCs that are linked to the SA and also migrates those SCCs. Similarly, the MTC migrates persistent volume claims that are linked to the persistent volumes of the namespace. Note Cluster-scoped resources might have to be migrated manually, depending on the resource. Custom resources (CRs) and custom resource definitions (CRDs): MTC automatically migrates CRs and CRDs at the namespace level. Migrating an application with the MTC web console involves the following steps: Install the Migration Toolkit for Containers Operator on all clusters. You can install the Migration Toolkit for Containers Operator in a restricted environment with limited or no internet access. The source and target clusters must have network access to each other and to a mirror registry. Configure the replication repository, an intermediate object storage that MTC uses to migrate data. The source and target clusters must have network access to the replication repository during migration. If you are using a proxy server, you must configure it to allow network traffic between the replication repository and the clusters. Add the source cluster to the MTC web console. Add the replication repository to the MTC web console. Create a migration plan, with one of the following data migration options: Copy : MTC copies the data from the source cluster to the replication repository, and from the replication repository to the target cluster. Note If you are using direct image migration or direct volume migration, the images or volumes are copied directly from the source cluster to the target cluster. Move : MTC unmounts a remote volume, for example, NFS, from the source cluster, creates a PV resource on the target cluster pointing to the remote volume, and then mounts the remote volume on the target cluster. Applications running on the target cluster use the same remote volume that the source cluster was using. The remote volume must be accessible to the source and target clusters. Note Although the replication repository does not appear in this diagram, it is required for migration. Run the migration plan, with one of the following options: Stage copies data to the target cluster without stopping the application. A stage migration can be run multiple times so that most of the data is copied to the target before migration. Running one or more stage migrations reduces the duration of the cutover migration. Cutover stops the application on the source cluster and moves the resources to the target cluster. Optional: You can clear the Halt transactions on the source cluster during migration checkbox. 5.3. About data copy methods The Migration Toolkit for Containers (MTC) supports the file system and snapshot data copy methods for migrating data from the source cluster to the target cluster. You can select a method that is suited for your environment and is supported by your storage provider. 5.3.1. File system copy method MTC copies data files from the source cluster to the replication repository, and from there to the target cluster. The file system copy method uses Restic for indirect migration or Rsync for direct volume migration. Table 5.2. File system copy method summary Benefits Limitations Clusters can have different storage classes. Supported for all S3 storage providers. Optional data verification with checksum. Supports direct volume migration, which significantly increases performance. Slower than the snapshot copy method. Optional data verification significantly reduces performance. 5.3.2. Snapshot copy method MTC copies a snapshot of the source cluster data to the replication repository of a cloud provider. The data is restored on the target cluster. The snapshot copy method can be used with Amazon Web Services, Google Cloud Provider, and Microsoft Azure. Table 5.3. Snapshot copy method summary Benefits Limitations Faster than the file system copy method. Cloud provider must support snapshots. Clusters must be on the same cloud provider. Clusters must be in the same location or region. Clusters must have the same storage class. Storage class must be compatible with snapshots. Does not support direct volume migration. 5.4. Direct volume migration and direct image migration You can use direct image migration (DIM) and direct volume migration (DVM) to migrate images and data directly from the source cluster to the target cluster. If you run DVM with nodes that are in different availability zones, the migration might fail because the migrated pods cannot access the persistent volume claim. DIM and DVM have significant performance benefits because the intermediate steps of backing up files from the source cluster to the replication repository and restoring files from the replication repository to the target cluster are skipped. The data is transferred with Rsync . DIM and DVM have additional prerequisites.
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.7/html/migrating_from_version_3_to_4/about-mtc-3-4
16.6.2. Running virt-rescue
16.6.2. Running virt-rescue Before you use virt-rescue on a guest virtual machine, make sure the guest virtual machine is not running, otherwise disk corruption will occur. When you are sure the guest virtual machine is not live, enter: (where GuestName is the guest name as known to libvirt), or: (where the path can be any file, any logical volume, LUN, or so on) containing a guest virtual machine disk. You will first see output scroll past, as virt-rescue boots the rescue VM. In the end you will see: The shell prompt here is an ordinary bash shell, and a reduced set of ordinary Red Hat Enterprise Linux commands is available. For example, you can enter: The command will list disk partitions. To mount a file system, it is suggested that you mount it under /sysroot , which is an empty directory in the rescue machine for the user to mount anything you like. Note that the files under / are files from the rescue VM itself: When you are finished rescuing the guest virtual machine, exit the shell by entering exit or Ctrl+d . virt-rescue has many command line options. The options most often used are: --ro : Operate in read-only mode on the guest virtual machine. No changes will be saved. You can use this to experiment with the guest virtual machine. As soon as you exit from the shell, all of your changes are discarded. --network : Enable network access from the rescue shell. Use this if you need to, for example, download RPM or other files into the guest virtual machine.
[ "virt-rescue GuestName", "virt-rescue /path/to/disk/image", "Welcome to virt-rescue, the libguestfs rescue shell. Note: The contents of / are the rescue appliance. You have to mount the guest virtual machine's partitions under /sysroot before you can examine them. bash: cannot set terminal process group (-1): Inappropriate ioctl for device bash: no job control in this shell ><rescue>", "><rescue> fdisk -l /dev/vda", "><rescue> mount /dev/vda1 /sysroot/ EXT4-fs (vda1): mounted filesystem with ordered data mode. Opts: (null) ><rescue> ls -l /sysroot/grub/ total 324 -rw-r--r--. 1 root root 63 Sep 16 18:14 device.map -rw-r--r--. 1 root root 13200 Sep 16 18:14 e2fs_stage1_5 -rw-r--r--. 1 root root 12512 Sep 16 18:14 fat_stage1_5 -rw-r--r--. 1 root root 11744 Sep 16 18:14 ffs_stage1_5 -rw-------. 1 root root 1503 Oct 15 11:19 grub.conf [...]" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/virtualization_administration_guide/sect-run-virt-rescue
B.2. Sample lvm.conf File
B.2. Sample lvm.conf File The following is a sample lvm.conf configuration file, the default file for RHEL 4.8. Your configuration file may differ for different releases.
[ "cat lvm.conf This is an example configuration file for the LVM2 system. It contains the default settings that would be used if there was no /etc/lvm/lvm.conf file. # Refer to 'man lvm.conf' for further information including the file layout. # To put this file in a different directory and override /etc/lvm set the environment variable LVM_SYSTEM_DIR before running the tools. This section allows you to configure which block devices should be used by the LVM system. devices { # Where do you want your volume groups to appear ? dir = \"/dev\" # An array of directories that contain the device nodes you wish # to use with LVM2. scan = [ \"/dev\" ] # A filter that tells LVM2 to only use a restricted set of devices. # The filter consists of an array of regular expressions. These # expressions can be delimited by a character of your choice, and # prefixed with either an 'a' (for accept) or 'r' (for reject). # The first expression found to match a device name determines if # the device will be accepted or rejected (ignored). Devices that # don't match any patterns are accepted. # Be careful if there there are symbolic links or multiple filesystem # entries for the same device as each name is checked separately against # the list of patterns. The effect is that if any name matches any 'a' # pattern, the device is accepted; otherwise if any name matches any 'r' # pattern it is rejected; otherwise it is accepted. # Don't have more than one filter line active at once: only one gets used. # Run vgscan after you change this parameter to ensure that # the cache file gets regenerated (see below). # If it doesn't do what you expect, check the output of 'vgscan -vvvv'. # By default we accept every block device: #filter = [ \"a/.*/\" ] # Exclude the cdrom drive # filter = [ \"r|/dev/cdrom|\" ] # When testing I like to work with just loopback devices: # filter = [ \"a/loop/\", \"r/.*/\" ] # Or maybe all loops and ide drives except hdc: # filter =[ \"a|loop|\", \"r|/dev/hdc|\", \"a|/dev/ide|\", \"r|.*|\" ] # Use anchors if you want to be really specific # filter = [ \"a|^/dev/hda8USD|\", \"r/.*/\" ] # The results of the filtering are cached on disk to avoid # rescanning dud devices (which can take a very long time). By # default this cache file is hidden in the /etc/lvm directory. # in a file called '.cache'. # It is safe to delete this file: the tools regenerate it. # (The old setting 'cache' is still respected if neither of # these new ones is present.) cache_dir = \"/etc/lvm/cache\" cache_file_prefix = \"\" # You can turn off writing this cache file by setting this to 0. write_cache_state = 1 # Advanced settings. # List of pairs of additional acceptable block device types found # in /proc/devices with maximum (non-zero) number of partitions. # types = [ \"fd\", 16 ] # If sysfs is mounted (2.6 kernels) restrict device scanning to # the block devices it believes are valid. # 1 enables; 0 disables. sysfs_scan = 1 # By default, LVM2 will ignore devices used as components of # software RAID (md) devices by looking for md superblocks. # 1 enables; 0 disables. md_component_detection = 1 # By default, if a PV is placed directly upon an md device, LVM2 # will align its data blocks with the the chunk_size exposed in sysfs. # 1 enables; 0 disables. md_chunk_alignment = 1 # If, while scanning the system for PVs, LVM2 encounters a device-mapper # device that has its I/O suspended, it waits for it to become accessible. # Set this to 1 to skip such devices. This should only be needed # in recovery situations. ignore_suspended_devices = 0 } This section that allows you to configure the nature of the information that LVM2 reports. log { # Controls the messages sent to stdout or stderr. # There are three levels of verbosity, 3 being the most verbose. verbose = 0 # Should we send log messages through syslog? # 1 is yes; 0 is no. syslog = 1 # Should we log error and debug messages to a file? # By default there is no log file. #file = \"/var/log/lvm2.log\" # Should we overwrite the log file each time the program is run? # By default we append. overwrite = 0 # What level of log messages should we send to the log file and/or syslog? # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive. # 7 is the most verbose (LOG_DEBUG). level = 0 # Format of output messages # Whether or not (1 or 0) to indent messages according to their severity indent = 1 # Whether or not (1 or 0) to display the command name on each line output command_names = 0 # A prefix to use before the message text (but after the command name, # if selected). Default is two spaces, so you can see/grep the severity # of each message. prefix = \" \" # To make the messages look similar to the original LVM tools use: # indent = 0 # command_names = 1 # prefix = \" -- \" # Set this if you want log messages during activation. # Don't use this in low memory situations (can deadlock). # activation = 0 } Configuration of metadata backups and archiving. In LVM2 when we talk about a 'backup' we mean making a copy of the metadata for the *current* system. The 'archive' contains old metadata configurations. Backups are stored in a human readable text format. backup { # Should we maintain a backup of the current metadata configuration ? # Use 1 for Yes; 0 for No. # Think very hard before turning this off! backup = 1 # Where shall we keep it ? # Remember to back up this directory regularly! backup_dir = \"/etc/lvm/backup\" # Should we maintain an archive of old metadata configurations. # Use 1 for Yes; 0 for No. # On by default. Think very hard before turning this off. archive = 1 # Where should archived files go ? # Remember to back up this directory regularly! archive_dir = \"/etc/lvm/archive\" # What is the minimum number of archive files you wish to keep ? retain_min = 10 # What is the minimum time you wish to keep an archive file for ? retain_days = 30 } Settings for the running LVM2 in shell (readline) mode. shell { # Number of lines of history to store in ~/.lvm_history history_size = 100 } Miscellaneous global LVM2 settings global { library_dir = \"/usr/lib64\" # The file creation mask for any files and directories created. # Interpreted as octal if the first digit is zero. umask = 077 # Allow other users to read the files #umask = 022 # Enabling test mode means that no changes to the on disk metadata # will be made. Equivalent to having the -t option on every # command. Defaults to off. test = 0 # Default value for --units argument units = \"h\" # Whether or not to communicate with the kernel device-mapper. # Set to 0 if you want to use the tools to manipulate LVM metadata # without activating any logical volumes. # If the device-mapper kernel driver is not present in your kernel # setting this to 0 should suppress the error messages. activation = 1 # If we can't communicate with device-mapper, should we try running # the LVM1 tools? # This option only applies to 2.4 kernels and is provided to help you # switch between device-mapper kernels and LVM1 kernels. # The LVM1 tools need to be installed with .lvm1 suffices # e.g. vgscan.lvm1 and they will stop working after you start using # the new lvm2 on-disk metadata format. # The default value is set when the tools are built. # fallback_to_lvm1 = 0 # The default metadata format that commands should use - \"lvm1\" or \"lvm2\". # The command line override is -M1 or -M2. # Defaults to \"lvm1\" if compiled in, else \"lvm2\". # format = \"lvm1\" # Location of proc filesystem proc = \"/proc\" # Type of locking to use. Defaults to local file-based locking (1). # Turn locking off by setting to 0 (dangerous: risks metadata corruption # if LVM2 commands get run concurrently). # Type 2 uses the external shared library locking_library. # Type 3 uses built-in clustered locking. locking_type = 1 # If using external locking (type 2) and initialisation fails, # with this set to 1 an attempt will be made to use the built-in # clustered locking. # If you are using a customised locking_library you should set this to 0. fallback_to_clustered_locking = 1 # If an attempt to initialise type 2 or type 3 locking failed, perhaps # because cluster components such as clvmd are not running, with this set # to 1 an attempt will be made to use local file-based locking (type 1). # If this succeeds, only commands against local volume groups will proceed. # Volume Groups marked as clustered will be ignored. fallback_to_local_locking = 1 # Local non-LV directory that holds file-based locks while commands are # in progress. A directory like /tmp that may get wiped on reboot is OK. locking_dir = \"/var/lock/lvm\" # Other entries can go here to allow you to load shared libraries # e.g. if support for LVM1 metadata was compiled as a shared library use # format_libraries = \"liblvm2format1.so\" # Full pathnames can be given. # Search this directory first for shared libraries. # library_dir = \"/lib\" # The external locking library to load if locking_type is set to 2. # locking_library = \"liblvm2clusterlock.so\" } activation { # How to fill in missing stripes if activating an incomplete volume. # Using \"error\" will make inaccessible parts of the device return # I/O errors on access. You can instead use a device path, in which # case, that device will be used to in place of missing stripes. # But note that using anything other than \"error\" with mirrored # or snapshotted volumes is likely to result in data corruption. missing_stripe_filler = \"error\" # How much stack (in KB) to reserve for use while devices suspended reserved_stack = 256 # How much memory (in KB) to reserve for use while devices suspended reserved_memory = 8192 # Nice value used while devices suspended process_priority = -18 # If volume_list is defined, each LV is only activated if there is a # match against the list. # \"vgname\" and \"vgname/lvname\" are matched exactly. # \"@tag\" matches any tag set in the LV or VG. # \"@*\" matches if any tag defined on the host is also set in the LV or VG # # volume_list = [ \"vg1\", \"vg2/lvol1\", \"@tag1\", \"@*\" ] # Size (in KB) of each copy operation when mirroring mirror_region_size = 512 # Setting to use when there is no readahead value stored in the metadata. # # \"none\" - Disable readahead. # \"auto\" - Use default value chosen by kernel. readahead = \"auto\" # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define # how a device failure affecting a mirror is handled. # A mirror is composed of mirror images (copies) and a log. # A disk log ensures that a mirror does not need to be re-synced # (all copies made the same) every time a machine reboots or crashes. # # In the event of a failure, the specified policy will be used to # determine what happens: # # \"remove\" - Simply remove the faulty device and run without it. If # the log device fails, the mirror would convert to using # an in-memory log. This means the mirror will not # remember its sync status across crashes/reboots and # the entire mirror will be re-synced. If a # mirror image fails, the mirror will convert to a # non-mirrored device if there is only one remaining good # copy. # # \"allocate\" - Remove the faulty device and try to allocate space on # a new device to be a replacement for the failed device. # Using this policy for the log is fast and maintains the # ability to remember sync state through crashes/reboots. # Using this policy for a mirror device is slow, as it # requires the mirror to resynchronize the devices, but it # will preserve the mirror characteristic of the device. # This policy acts like \"remove\" if no suitable device and # space can be allocated for the replacement. # Currently this is not implemented properly and behaves # similarly to: # # \"allocate_anywhere\" - Operates like \"allocate\", but it does not # require that the new space being allocated be on a # device is not part of the mirror. For a log device # failure, this could mean that the log is allocated on # the same device as a mirror device. For a mirror # device, this could mean that the mirror device is # allocated on the same device as another mirror device. # This policy would not be wise for mirror devices # because it would break the redundant nature of the # mirror. This policy acts like \"remove\" if no suitable # device and space can be allocated for the replacement. mirror_log_fault_policy = \"allocate\" mirror_device_fault_policy = \"remove\" } #################### Advanced section # #################### Metadata settings # metadata { # Default number of copies of metadata to hold on each PV. 0, 1 or 2. # You might want to override it from the command line with 0 # when running pvcreate on new PVs which are to be added to large VGs. # pvmetadatacopies = 1 # Approximate default size of on-disk metadata areas in sectors. # You should increase this if you have large volume groups or # you want to retain a large on-disk history of your metadata changes. # pvmetadatasize = 255 # List of directories holding live copies of text format metadata. # These directories must not be on logical volumes! # It's possible to use LVM2 with a couple of directories here, # preferably on different (non-LV) filesystems, and with no other # on-disk metadata (pvmetadatacopies = 0). Or this can be in # addition to on-disk metadata areas. # The feature was originally added to simplify testing and is not # supported under low memory situations - the machine could lock up. # # Never edit any files in these directories by hand unless you # you are absolutely sure you know what you are doing! Use # the supplied toolset to make changes (e.g. vgcfgrestore). # dirs = [ \"/etc/lvm/metadata\", \"/mnt/disk2/lvm/metadata2\" ] #} Event daemon # dmeventd { # mirror_library is the library used when monitoring a mirror device. # # \"libdevmapper-event-lvm2mirror.so\" attempts to recover from # failures. It removes failed devices from a volume group and # reconfigures a mirror as necessary. If no mirror library is # provided, mirrors are not monitored through dmeventd. # mirror_library = \"libdevmapper-event-lvm2mirror.so\" # snapshot_library is the library used when monitoring a snapshot device. # # \"libdevmapper-event-lvm2snapshot.so\" monitors the filling of # snapshots and emits a warning through syslog, when the use of # snapshot exceedes 80%. The warning is repeated when 85%, 90% and # 95% of the snapshot are filled. # snapshot_library = \"libdevmapper-event-lvm2snapshot.so\" #}" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/cluster_logical_volume_manager/lvmconf_file
Chapter 1. Preparing to install on {rh-virtualization-first}
Chapter 1. Preparing to install on {rh-virtualization-first} 1.1. Prerequisites You reviewed details about the OpenShift Container Platform installation and update processes. You have a supported combination of versions in the Support Matrix for OpenShift Container Platform on Red Hat Virtualization (RHV) . You read the documentation on selecting a cluster installation method and preparing it for users . 1.2. Choosing a method to install OpenShift Container Platform on RHV You can install OpenShift Container Platform on installer-provisioned or user-provisioned infrastructure. The default installation type uses installer-provisioned infrastructure, where the installation program provisions the underlying infrastructure for the cluster. You can also install OpenShift Container Platform on infrastructure that you provision. If you do not use infrastructure that the installation program provisions, you must manage and maintain the cluster resources yourself. See Installation process for more information about installer-provisioned and user-provisioned installation processes. 1.2.1. Installing a cluster on installer-provisioned infrastructure You can install a cluster on Red Hat Virtualization (RHV) virtual machines that are provisioned by the OpenShift Container Platform installation program, by using one of the following methods: Installing a cluster quickly on RHV : You can quickly install OpenShift Container Platform on RHV virtual machines that the OpenShift Container Platform installation program provisions. Installing a cluster on RHV with customizations : You can install a customized OpenShift Container Platform cluster on installer-provisioned guests on RHV. The installation program allows for some customization to be applied at the installation stage. Many other customization options are available post-installation . 1.2.2. Installing a cluster on user-provisioned infrastructure You can install a cluster on RHV virtual machines that you provision, by using one of the following methods: Installing a cluster on RHV with user-provisioned infrastructure : You can install OpenShift Container Platform on RHV virtual machines that you provision. You can use the provided Ansible playbooks to assist with the installation. Installing a cluster on RHV in a restricted network : You can install OpenShift Container Platform on RHV in a restricted or disconnected network by creating an internal mirror of the installation release content. You can use this method to install a user-provisioned cluster that does not require an active internet connection to obtain the software components. You can also use this installation method to ensure that your clusters only use container images that satisfy your organizational controls on external content.
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html/installing_on_rhv/preparing-to-install-on-rhv
Chapter 7. New Packages
Chapter 7. New Packages 7.1. RHEA-2013:1625 - new packages: freerdp New freerdp packages are now available for Red Hat Enterprise Linux 6. FreeRDP is a free implementation of the Remote Desktop Protocol (RDP), released under the Apache license. The xfreerdp client can connect to RDP servers such as Microsoft Windows machines, xrdp and VirtualBox. This enhancement update adds the freerdp packages to Red Hat Enterprise Linux 6. (BZ# 951696 ) All users who require freerdp are advised to install these new packages.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.5_technical_notes/ch07
4.5. Configure Expiration
4.5. Configure Expiration In Red Hat JBoss Data Grid, expiration is configured in a manner similar to eviction, with differing parameters for Library Mode and Remote Client-Server Mode. Procedure 4.1. Configure Expiration in Library Mode Add the Expiration Tag Add the <expiration> tag to your project's <cache> tags as follows: Set the Expiration Lifespan Set the lifespan value to set the period of time (in milliseconds) an entry can remain in memory. The following is an example of this step: Set the Maximum Idle Time Set the time that entries are allowed to remain idle (unused) after which they are removed (in milliseconds). The default value is -1 for unlimited time. Set the Background Reaper Thread Enable or disable the background reaper thread to test entries for expiration. Regardless of whether a reaper is used, entries are tested for expiration lazily when they are touched. The default value is true . Set the Expiration Interval Set the interval (in milliseconds) between subsequent runs to purge expired entries from memory and any associated cache stores. To disable the periodic eviction process set the interval to -1 . The default value is 1000 . Procedure 4.2. Configuration Expiration in Remote Client-Server Mode Set the Expiration Lifespan Set the lifespan value to set the period of time (in milliseconds) an entry can remain in memory. The following is an example of this step: Set the Maximum Idle Time Set the time that entries are allowed to remain idle (unused) after which they are removed (in milliseconds). The default value is -1 for unlimited time. Set the Expiration Interval Set the interval (in milliseconds) between subsequent runs to purge expired entries from memory and any associated cache stores. To disable the periodic eviction process set the interval to -1 . The default value is 5000 . Reaper Thread in Remote Client-Server Mode In Remote Client-Server Mode the background reaper thread is only enabled if interval is greater than 0 . As interval defaults to 5000 the background reaper thread is automatically enabled if expiration is configured. Report a bug
[ "<expiration />", "<expiration lifespan=\"1000\" />", "<expiration lifespan=\"1000\" maxIdle=\"1000\" />", "<expiration lifespan=\"1000\" maxIdle=\"1000\" reaperEnabled=\"true\" />", "<expiration lifespan=\"1000\" maxIdle=\"1000\" reaperEnabled=\"true\" wakeUpInterval=\"5000\" />", "<expiration lifespan=\"1000\" />", "<expiration lifespan=\"1000\" max-idle=\"1000\" />", "<expiration lifespan=\"1000\" max-idle=\"1000\" interval=\"10000\" />" ]
https://docs.redhat.com/en/documentation/red_hat_data_grid/6.6/html/administration_and_configuration_guide/expiration_configuration
Chapter 4. Configuration
Chapter 4. Configuration This chapter describes the process for binding the AMQ JMS implementation to your JMS application and setting configuration options. JMS uses the Java Naming Directory Interface (JNDI) to register and look up API implementations and other resources. This enables you to write code to the JMS API without tying it to a particular implementation. Configuration options are exposed as query parameters on the connection URI. 4.1. Configuring the JNDI initial context JMS applications use a JNDI InitialContext object obtained from an InitialContextFactory to look up JMS objects such as the connection factory. AMQ JMS provides an implementation of the InitialContextFactory in the org.apache.qpid.jms.jndi.JmsInitialContextFactory class. The InitialContextFactory implementation is discovered when the InitialContext object is instantiated: javax.naming.Context context = new javax.naming.InitialContext(); To find an implementation, JNDI must be configured in your environment. There are three ways of achieving this: using a jndi.properties file, using a system property, or using the initial context API. Using a jndi.properties file Create a file named jndi.properties and place it on the Java classpath. Add a property with the key java.naming.factory.initial . Example: Setting the JNDI initial context factory using a jndi.properties file java.naming.factory.initial = org.apache.qpid.jms.jndi.JmsInitialContextFactory In Maven-based projects, the jndi.properties file is placed in the <project-dir> /src/main/resources directory. Using a system property Set the java.naming.factory.initial system property. Example: Setting the JNDI initial context factory using a system property USD java -Djava.naming.factory.initial=org.apache.qpid.jms.jndi.JmsInitialContextFactory ... Using the initial context API Use the JNDI initial context API to set properties programatically. Example: Setting JNDI properties programatically Hashtable<Object, Object> env = new Hashtable<>(); env.put("java.naming.factory.initial", "org.apache.qpid.jms.jndi.JmsInitialContextFactory"); InitialContext context = new InitialContext(env); Note that you can use the same API to set the JNDI properties for connection factories, queues, and topics. 4.2. Configuring the connection factory The JMS connection factory is the entry point for creating connections. It uses a connection URI that encodes your application-specific configuration settings. To set the factory name and connection URI, create a property in the format below. You can store this configuration in a jndi.properties file or set the corresponding system property. The JNDI property format for connection factories connectionFactory. <lookup-name> = <connection-uri> For example, this is how you might configure a factory named app1 : Example: Setting the connection factory in a jndi.properties file connectionFactory.app1 = amqp://example.net:5672?jms.clientID=backend You can then use the JNDI context to look up your configured connection factory using the name app1 : ConnectionFactory factory = (ConnectionFactory) context.lookup("app1"); 4.3. Connection URIs Connections are configured using a connection URI. The connection URI specifies the remote host, port, and a set of configuration options, which are set as query parameters. For more information about the available options, see Chapter 5, Configuration options . The connection URI format The scheme is amqp for unencrypted connections and amqps for SSL/TLS connections. For example, the following is a connection URI that connects to host example.net at port 5672 and sets the client ID to backend : Example: A connection URI Failover URIs When failover is configured, the client can reconnect to another server automatically if the connection to the current server is lost. Failover URIs have the prefix failover: and contain a comma-separated list of connection URIs inside parentheses. Additional options are specified at the end. The failover URI format For example, the following is a failover URI that can connect to either of two hosts, host1 or host2 : Example: A failover URI As with the connection URI example, the client can be configured with a number of different settings using the URI in a failover configuration. These settings are detailed in Chapter 5, Configuration options , with the Section 5.5, "Failover options" section being of particular interest. SSL/TLS Server Name Indication When the amqps scheme is used to specify an SSL/TLS connection, the host segment from the URI can be used by the JVM's TLS Server Name Indication (SNI) extension to communicate the desired server hostname during a TLS handshake. The SNI extension is automatically included if a fully qualified domain name (for example, "myhost.mydomain") is specified, but not when an unqualified name (for example, "myhost") or a bare IP address is used. 4.4. Configuring queue and topic names JMS provides the option of using JNDI to look up deployment-specific queue and topic resources. To set queue and topic names in JNDI, create properties in the following format. Either place this configuration in a jndi.properties file or set corresponding system properties. The JNDI property format for queues and topics queue. <lookup-name> = <queue-name> topic. <lookup-name> = <topic-name> For example, the following properties define the names jobs and notifications for two deployment-specific resources: Example: Setting queue and topic names in a jndi.properties file queue.jobs = app1/work-items topic.notifications = app1/updates You can then look up the resources by their JNDI names: Queue queue = (Queue) context.lookup("jobs"); Topic topic = (Topic) context.lookup("notifications"); 4.5. Variable expansion in JNDI properties JNDI property values can contain variables of the form USD{ <variable-name> } . The library resolves the variable value by searching in order in the following locations: Java system properties OS environment variables The JNDI properties file or environment Hashtable For example, on Linux USD{HOME} resolves to the HOME environment variable, the current user's home directory. A default value can be supplied using the syntax USD{ <variable-name> :- <default-value> } . If no value for <variable-name> is found, the default value is used instead.
[ "javax.naming.Context context = new javax.naming.InitialContext();", "java.naming.factory.initial = org.apache.qpid.jms.jndi.JmsInitialContextFactory", "java -Djava.naming.factory.initial=org.apache.qpid.jms.jndi.JmsInitialContextFactory", "Hashtable<Object, Object> env = new Hashtable<>(); env.put(\"java.naming.factory.initial\", \"org.apache.qpid.jms.jndi.JmsInitialContextFactory\"); InitialContext context = new InitialContext(env);", "connectionFactory. <lookup-name> = <connection-uri>", "connectionFactory.app1 = amqp://example.net:5672?jms.clientID=backend", "ConnectionFactory factory = (ConnectionFactory) context.lookup(\"app1\");", "<scheme>://<host>:<port>[?<option>=<value>[&<option>=<value>...]]", "amqp://example.net:5672?jms.clientID=backend", "failover:(<connection-uri>[,<connection-uri>...])[?<option>=<value>[&<option>=<value>...]]", "failover:(amqp://host1:5672,amqp://host2:5672)?jms.clientID=backend", "queue. <lookup-name> = <queue-name> topic. <lookup-name> = <topic-name>", "queue.jobs = app1/work-items topic.notifications = app1/updates", "Queue queue = (Queue) context.lookup(\"jobs\"); Topic topic = (Topic) context.lookup(\"notifications\");" ]
https://docs.redhat.com/en/documentation/red_hat_amq/2021.q3/html/using_the_amq_jms_client/configuration
Chapter 17. Impersonating the system:admin user
Chapter 17. Impersonating the system:admin user 17.1. API impersonation You can configure a request to the OpenShift Container Platform API to act as though it originated from another user. For more information, see User impersonation in the Kubernetes documentation. 17.2. Impersonating the system:admin user You can grant a user permission to impersonate system:admin , which grants them cluster administrator permissions. Procedure To grant a user permission to impersonate system:admin , run the following command: USD oc create clusterrolebinding <any_valid_name> --clusterrole=sudoer --user=<username> Tip You can alternatively apply the following YAML to grant permission to impersonate system:admin : apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: <any_valid_name> roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: sudoer subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: <username> 17.3. Impersonating the system:admin group When a system:admin user is granted cluster administration permissions through a group, you must include the --as=<user> --as-group=<group1> --as-group=<group2> parameters in the command to impersonate the associated groups. Procedure To grant a user permission to impersonate a system:admin by impersonating the associated cluster administration groups, run the following command: USD oc create clusterrolebinding <any_valid_name> --clusterrole=sudoer --as=<user> \ --as-group=<group1> --as-group=<group2>
[ "oc create clusterrolebinding <any_valid_name> --clusterrole=sudoer --user=<username>", "apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: <any_valid_name> roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: sudoer subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: <username>", "oc create clusterrolebinding <any_valid_name> --clusterrole=sudoer --as=<user> --as-group=<group1> --as-group=<group2>" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/authentication_and_authorization/impersonating-system-admin
Chapter 78. Obtaining an IdM certificate for a service using certmonger
Chapter 78. Obtaining an IdM certificate for a service using certmonger 78.1. Certmonger overview When Identity Management (IdM) is installed with an integrated IdM Certificate Authority (CA), it uses the certmonger service to track and renew system and service certificates. When the certificate is reaching its expiration date, certmonger manages the renewal process by: Regenerating a certificate-signing request (CSR) using the options provided in the original request. Submitting the CSR to the IdM CA using the IdM API cert-request command. Receiving the certificate from the IdM CA. Executing a pre-save command if specified by the original request. Installing the new certificate in the location specified in the renewal request: either in an NSS database or in a file. Executing a post-save command if specified by the original request. For example, the post-save command can instruct certmonger to restart a relevant service, so that the service picks up the new certificate. Types of certificates certmonger tracks Certificates can be divided into system and service certificates. Unlike service certificates (for example, for HTTP , LDAP and PKINIT ), which have different keypairs and subject names on different servers, IdM system certificates and their keys are shared by all CA replicas. The IdM system certificates include: IdM CA certificate OCSP signing certificate IdM CA subsystem certificates IdM CA audit signing certificate IdM renewal agent (RA) certificate KRA transport and storage certificates The certmonger service tracks the IdM system and service certificates that were requested during the installation of IdM environment with an integrated CA. Certmonger also tracks certificates that have been requested manually by the system administrator for other services running on the IdM host. Certmonger does not track external CA certificates or user certificates. Certmonger components The certmonger service consists of two main components: The certmonger daemon , which is the engine tracking the list of certificates and launching renewal commands The getcert utility for the command-line interface (CLI), which allows the system administrator to actively send commands to the certmonger daemon. More specifically, the system administrator can use the getcert utility to: Request a new certificate View the list of certificates that certmonger tracks Start or stop tracking a certificate Renew a certificate 78.2. Obtaining an IdM certificate for a service using certmonger To ensure that communication between browsers and the web service running on your Identity Management (IdM) client is secure and encrypted, use a TLS certificate. Obtain the TLS certificate for your web service from the IdM Certificate Authority (CA). Follow this procedure to use certmonger to obtain an IdM certificate for a service ( HTTP/my_company.idm.example.com @ IDM.EXAMPLE.COM ) running on an IdM client. Using certmonger to request the certificate automatically means that certmonger manages and renews the certificate when it is due for a renewal. For a visual representation of what happens when certmonger requests a service certificate, see Communication flow for certmonger requesting a service certificate . Prerequisites The web server is enrolled as an IdM client. You have root access to the IdM client on which you are running the procedure. The service for which you are requesting a certificate does not have to pre-exist in IdM. Procedure On the my_company.idm.example.com IdM client on which the HTTP service is running, request a certificate for the service corresponding to the HTTP/[email protected] principal, and specify that The certificate is to be stored in the local /etc/pki/tls/certs/httpd.pem file The private key is to be stored in the local /etc/pki/tls/private/httpd.key file That an extensionRequest for a SubjectAltName be added to the signing request with the DNS name of my_company.idm.example.com : In the command above: The ipa-getcert request command specifies that the certificate is to be obtained from the IdM CA. The ipa-getcert request command is a shortcut for getcert request -c IPA . The -g option specifies the size of key to be generated if one is not already in place. The -D option specifies the SubjectAltName DNS value to be added to the request. The -C option instructs certmonger to restart the httpd service after obtaining the certificate. To specify that the certificate be issued with a particular profile, use the -T option. To request a certificate using the named issuer from the specified CA, use the -X ISSUER option. Note RHEL 8 uses a different SSL module in Apache than the one used in RHEL 7. The SSL module relies on OpenSSL rather than NSS. For this reason, in RHEL 8 you cannot use an NSS database to store the HTTPS certificate and the private key. Optional: To check the status of your request: The output shows that the request is in the MONITORING status, which means that a certificate has been obtained. The locations of the key pair and the certificate are those requested. 78.3. Communication flow for certmonger requesting a service certificate These diagrams show the stages of what happens when certmonger requests a service certificate from Identity Management (IdM) certificate authority (CA) server. The sequence consists of these diagrams: Unencrypted communication Certmonger requesting a service certificate IdM CA issuing the service certificate Certmonger applying the service certificate Certmonger requesting a new certificate when the old one is nearing expiration Unencrypted communication shows the initial situation: without an HTTPS certificate, the communication between the web server and the browser is unencrypted. Figure 78.1. Unencrypted communication Certmonger requesting a service certificate shows the system administrator using certmonger to manually request an HTTPS certificate for the Apache web server. Note that when requesting a web server certificate, certmonger does not communicate directly with the CA. It proxies through IdM. Figure 78.2. Certmonger requesting a service certificate IdM CA issuing the service certificate shows an IdM CA issuing an HTTPS certificate for the web server. Figure 78.3. IdM CA issuing the service certificate Certmonger applying the service certificate shows certmonger placing the HTTPS certificate in appropriate locations on the IdM client and, if instructed to do so, restarting the httpd service. The Apache server subsequently uses the HTTPS certificate to encrypt the traffic between itself and the browser. Figure 78.4. Certmonger applying the service certificate Certmonger requesting a new certificate when the old one is nearing expiration shows certmonger automatically requesting a renewal of the service certificate from the IdM CA before the expiration of the certificate. The IdM CA issues a new certificate. Figure 78.5. Certmonger requesting a new certificate when the old one is nearing expiration 78.4. Viewing the details of a certificate request tracked by certmonger The certmonger service monitors certificate requests. When a request for a certificate is successfully signed, it results in a certificate. Certmonger manages certificate requests including the resulting certificates. Follow this procedure to view the details of a particular certificate request managed by certmonger . Procedure If you know how to specify the certificate request, list the details of only that particular certificate request. You can, for example, specify: The request ID The location of the certificate The certificate nickname For example, to view the details of the certificate whose request ID is 20190408143846, using the -v option to view all the details of errors in case your request for a certificate was unsuccessful: The output displays several pieces of information about the certificate, for example: the certificate location; in the example above, it is the NSS database in the /etc/dirsrv/slapd-IDM-EXAMPLE-COM directory the certificate nickname; in the example above, it is Server-Cert the file storing the pin; in the example above, it is /etc/dirsrv/slapd-IDM-EXAMPLE-COM/pwdfile.txt the Certificate Authority (CA) that will be used to renew the certificate; in the example above, it is the IPA CA the expiration date; in the example above, it is 2021-04-08 16:38:47 CEST the status of the certificate; in the example above, the MONITORING status means that the certificate is valid and it is being tracked the post-save command; in the example above, it is the restart of the LDAP service If you do not know how to specify the certificate request, list the details of all the certificates that certmonger is monitoring or attempting to obtain: Additional resources See the getcert list man page on your system. 78.5. Starting and stopping certificate tracking Follow this procedure to use the getcert stop-tracking and getcert start-tracking commands to monitor certificates. The two commands are provided by the certmonger service. Enabling certificate tracking is especially useful if you have imported a certificate issued by the Identity Management (IdM) certificate authority (CA) onto the machine from a different IdM client. Enabling certificate tracking can also be the final step of the following provisioning scenario: On the IdM server, you create a certificate for a system that does not exist yet. You create the new system. You enroll the new system as an IdM client. You import the certificate and the key from the IdM server on to the IdM client. You start tracking the certificate using certmonger to ensure that it gets renewed when it is due to expire. Procedure To disable the monitoring of a certificate with the Request ID of 20190408143846: For more options, see the getcert stop-tracking man page on your system. To enable the monitoring of a certificate stored in the /tmp/some_cert.crt file, whose private key is stored in the /tmp/some_key.key file: Certmonger cannot automatically identify the CA type that issued the certificate. For this reason, add the -c option with the IPA value to the getcert start-tracking command if the certificate was issued by the IdM CA. Omitting to add the -c option results in certmonger entering the NEED_CA state. For more options, see the getcert start-tracking man page on your system. Note The two commands do not manipulate the certificate. For example, getcert stop-tracking does not delete the certificate or remove it from the NSS database or from the filesystem but simply removes the certificate from the list of monitored certificates. Similarly, getcert start-tracking only adds a certificate to the list of monitored certificates. 78.6. Renewing a certificate manually When a certificate is near its expiration date, the certmonger daemon automatically issues a renewal command using the certificate authority (CA) helper, obtains a renewed certificate and replaces the certificate with the new one. You can also manually renew a certificate in advance by using the getcert resubmit command. This way, you can update the information the certificate contains, for example, by adding a Subject Alternative Name (SAN). Follow this procedure to renew a certificate manually. Procedure To renew a certificate with the Request ID of 20190408143846: To obtain the Request ID for a specific certificate, use the getcert list command. For details, see the getcert list man page on your system. 78.7. Making certmonger resume tracking of IdM certificates on a CA replica This procedure shows how to make certmonger resume the tracking of Identity Management (IdM) system certificates that are crucial for an IdM deployment with an integrated certificate authority after the tracking of certificates was interrupted. The interruption may have been caused by the IdM host being unenrolled from IdM during the renewal of the system certificates or by replication topology not working properly. The procedure also shows how to make certmonger resume the tracking of the IdM service certificates, namely the HTTP , LDAP and PKINIT certificates. Prerequisites The host on which you want to resume tracking system certificates is an IdM server that is also an IdM certificate authority (CA) but not the IdM CA renewal server. Procedure Get the PIN for the subsystem CA certificates: Add tracking to the subsystem CA certificates, replacing [internal PIN] in the commands below with the PIN obtained in the step: Add tracking for the remaining IdM certificates, the HTTP , LDAP , IPA renewal agent and PKINIT certificates: Restart certmonger : Wait for one minute after certmonger has started and then check the statuses of the new certificates: Note the following: If your IdM system certificates have all expired, see the Red Hat Knowledgebase solution How do I manually renew Identity Management (IPA) certificates on RHEL7/RHEL 8 after they have expired? to manually renew IdM system certificates on the IdM CA server that is also the CA renewal server and the CRL publisher server. Follow the procedure described in the Red Hat Knowledgebase solution How do I manually renew Identity Management (IPA) certificates on RHEL7 after they have expired? to manually renew IdM system certificates on all the other CA servers in the topology. 78.8. Using SCEP with certmonger The Simple Certificate Enrollment Protocol (SCEP) is a certificate management protocol that you can use across different devices and operating systems. If you are using a SCEP server as an external certificate authority (CA) in your environment, you can use certmonger to obtain a certificate for an Identity Management (IdM) client. 78.8.1. SCEP overview The Simple Certificate Enrollment Protocol (SCEP) is a certificate management protocol that you can use across different devices and operating systems. You can use a SCEP server as an external certificate authority (CA). You can configure an Identity Management (IdM) client to request and retrieve a certificate over HTTP directly from the CA SCEP service. This process is secured by a shared secret that is usually valid only for a limited time. On the client side, SCEP requires you to provide the following components: SCEP URL: the URL of the CA SCEP interface. SCEP shared secret: a challengePassword PIN shared between the CA and the SCEP client, used to obtain the certificate. The client then retrieves the CA certificate chain over SCEP and sends a certificate signing request to the CA. When configuring SCEP with certmonger , you create a new CA configuration profile that specifies the issued certificate parameters. 78.8.2. Requesting an IdM CA-signed certificate through SCEP The following example adds a SCEP_example SCEP CA configuration to certmonger and requests a new certificate on the client.idm.example.com IdM client. certmonger supports both the NSS certificate database format and file-based (PEM) formats, such as OpenSSL. Prerequisites You know the SCEP URL. You have the challengePassword PIN shared secret. Procedure Add the CA configuration to certmonger : -c : Mandatory nickname for the CA configuration. The same value can later be used with other getcert commands. -u : URL of the server's SCEP interface. Important When using an HTTPS URL, you must also specify the location of the PEM-formatted copy of the SCEP server CA certificate using the -R option. Verify that the CA configuration has been successfully added: If the configuration was successfully added, certmonger retrieves the CA chain from the remote CA. The CA chain then appears as thumbprints in the command output. When accessing the server over unencrypted HTTP, manually compare the thumbprints with the ones displayed at the SCEP server to prevent a man-in-the-middle attack. Request a certificate from the CA: If you are using NSS: You can use the options to specify the following parameters of the certificate request: -I : (Optional) Name of the task: the tracking ID for the request. The same value can later be used with the getcert list command. -c : CA configuration to submit the request to. -d : Directory with the NSS database to store the certificate and key. -n : Nickname of the certificate, used in the NSS database. -N : Subject name in the CSR. -L : Time-limited one-time challengePassword PIN issued by the CA. -D : Subject Alternative Name for the certificate, usually the same as the host name. If you are using OpenSSL: You can use the options to specify the following parameters of the certificate request: -I : (Optional) Name of the task: the tracking ID for the request. The same value can later be used with the getcert list command. -c : CA configuration to submit the request to. -f : Storage path to the certificate. -k : Storage path to the key. -N : Subject name in the CSR. -L : Time-limited one-time challengePassword PIN issued by the CA. -D : Subject Alternative Name for the certificate, usually the same as the host name. Verification Verify that a certificate was issued and correctly stored in the local database: If you used NSS, enter: If you used OpenSSL, enter: The status MONITORING signifies a successful retrieval of the issued certificate. The getcert-list(1) man page lists other possible states and their meanings. Additional resources For more options when requesting a certificate, see the getcert-request(1) man page on your system. 78.8.3. Automatically renewing AD SCEP certificates with certmonger When certmonger sends a SCEP certificate renewal request, this request is signed using the existing certificate private key. However, renewal requests sent by certmonger by default also include the challengePassword PIN that was used to originally obtain the certificates. An Active Directory (AD) Network Device Enrollment Service (NDES) server that works as the SCEP server automatically rejects any requests for renewal that contain the original challengePassword PIN. Consequently, the renewal fails. For renewal with AD to work, you need to configure certmonger to send the signed renewal requests without the challengePassword PIN. You also need to configure the AD server so that it does not compare the subject name at renewal. Note There may be SCEP servers other than AD that also refuse requests containing the challengePassword . In those cases, you may also need to change the certmonger configuration in this way. Prerequisites The RHEL server has to be running RHEL 8.6 or newer. Procedure Open regedit on the AD server. In the HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Cryptography\MSCEP subkey, add a new 32-bit REG_DWORD entry DisableRenewalSubjectNameMatch and set its value to 1 . On the server where certmonger is running, open the /etc/certmonger/certmonger.conf file and add the following section: Restart certmonger:
[ "ipa-getcert request -K HTTP/my_company.idm.example.com -k /etc/pki/tls/private/httpd.key -f /etc/pki/tls/certs/httpd.pem -g 2048 -D my_company.idm.example.com -C \"systemctl restart httpd\" New signing request \"20190604065735\" added.", "ipa-getcert list -f /etc/pki/tls/certs/httpd.pem Number of certificates and requests being tracked: 3. Request ID '20190604065735': status: MONITORING stuck: no key pair storage: type=FILE,location='/etc/pki/tls/private/httpd.key' certificate: type=FILE,location='/etc/pki/tls/certs/httpd.crt' CA: IPA [...]", "getcert list -i 20190408143846 -v Number of certificates and requests being tracked: 16. Request ID '20190408143846': status: MONITORING stuck: no key pair storage: type=NSSDB,location='/etc/dirsrv/slapd-IDM-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-IDM-EXAMPLE-COM/pwdfile.txt' certificate: type=NSSDB,location='/etc/dirsrv/slapd-IDM-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB' CA: IPA issuer: CN=Certificate Authority,O=IDM.EXAMPLE.COM subject: CN=r8server.idm.example.com,O=IDM.EXAMPLE.COM expires: 2021-04-08 16:38:47 CEST dns: r8server.idm.example.com principal name: ldap/[email protected] key usage: digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment eku: id-kp-serverAuth,id-kp-clientAuth pre-save command: post-save command: /usr/libexec/ipa/certmonger/restart_dirsrv IDM-EXAMPLE-COM track: true auto-renew: true", "getcert list", "getcert stop-tracking -i 20190408143846", "getcert start-tracking -c IPA -f /tmp/some_cert.crt -k /tmp/some_key.key", "getcert resubmit -i 20190408143846", "grep 'internal=' /var/lib/pki/pki-tomcat/conf/password.conf", "getcert start-tracking -d /etc/pki/pki-tomcat/alias -n \"caSigningCert cert-pki-ca\" -c 'dogtag-ipa-ca-renew-agent' -P [internal PIN] -B /usr/libexec/ipa/certmonger/stop_pkicad -C '/usr/libexec/ipa/certmonger/renew_ca_cert \"caSigningCert cert-pki-ca\"' -T caCACert getcert start-tracking -d /etc/pki/pki-tomcat/alias -n \"auditSigningCert cert-pki-ca\" -c 'dogtag-ipa-ca-renew-agent' -P [internal PIN] -B /usr/libexec/ipa/certmonger/stop_pkicad -C '/usr/libexec/ipa/certmonger/renew_ca_cert \"auditSigningCert cert-pki-ca\"' -T caSignedLogCert getcert start-tracking -d /etc/pki/pki-tomcat/alias -n \"ocspSigningCert cert-pki-ca\" -c 'dogtag-ipa-ca-renew-agent' -P [internal PIN] -B /usr/libexec/ipa/certmonger/stop_pkicad -C '/usr/libexec/ipa/certmonger/renew_ca_cert \"ocspSigningCert cert-pki-ca\"' -T caOCSPCert getcert start-tracking -d /etc/pki/pki-tomcat/alias -n \"subsystemCert cert-pki-ca\" -c 'dogtag-ipa-ca-renew-agent' -P [internal PIN] -B /usr/libexec/ipa/certmonger/stop_pkicad -C '/usr/libexec/ipa/certmonger/renew_ca_cert \"subsystemCert cert-pki-ca\"' -T caSubsystemCert getcert start-tracking -d /etc/pki/pki-tomcat/alias -n \"Server-Cert cert-pki-ca\" -c 'dogtag-ipa-ca-renew-agent' -P [internal PIN] -B /usr/libexec/ipa/certmonger/stop_pkicad -C '/usr/libexec/ipa/certmonger/renew_ca_cert \"Server-Cert cert-pki-ca\"' -T caServerCert", "getcert start-tracking -f /var/lib/ipa/certs/httpd.crt -k /var/lib/ipa/private/httpd.key -p /var/lib/ipa/passwds/idm.example.com-443-RSA -c IPA -C /usr/libexec/ipa/certmonger/restart_httpd -T caIPAserviceCert getcert start-tracking -d /etc/dirsrv/slapd-IDM-EXAMPLE-COM -n \"Server-Cert\" -c IPA -p /etc/dirsrv/slapd-IDM-EXAMPLE-COM/pwdfile.txt -C '/usr/libexec/ipa/certmonger/restart_dirsrv \"IDM-EXAMPLE-COM\"' -T caIPAserviceCert getcert start-tracking -f /var/lib/ipa/ra-agent.pem -k /var/lib/ipa/ra-agent.key -c dogtag-ipa-ca-renew-agent -B /usr/libexec/ipa/certmonger/renew_ra_cert_pre -C /usr/libexec/ipa/certmonger/renew_ra_cert -T caSubsystemCert getcert start-tracking -f /var/kerberos/krb5kdc/kdc.crt -k /var/kerberos/krb5kdc/kdc.key -c dogtag-ipa-ca-renew-agent -B /usr/libexec/ipa/certmonger/renew_ra_cert_pre -C /usr/libexec/ipa/certmonger/renew_kdc_cert -T KDCs_PKINIT_Certs", "systemctl restart certmonger", "getcert list", "getcert add-scep-ca -c SCEP_example -u SCEP_URL", "getcert list-cas -c SCEP_example CA 'SCEP_example': is-default: no ca-type: EXTERNAL helper-location: /usr/libexec/certmonger/scep-submit -u http://SCEP_server_enrollment_interface_URL SCEP CA certificate thumbprint (MD5): A67C2D4B 771AC186 FCCA654A 5E55AAF7 SCEP CA certificate thumbprint (SHA1): FBFF096C 6455E8E9 BD55F4A5 5787C43F 1F512279", "getcert request -I Example_Task -c SCEP_example -d /etc/pki/nssdb -n ExampleCert -N cn=\" client.idm.example.com \" -L one-time_PIN -D client.idm.example.com", "getcert request -I Example_Task -c SCEP_example -f /etc/pki/tls/certs/server.crt -k /etc/pki/tls/private/private.key -N cn=\" client.idm.example.com \" -L one-time_PIN -D client.idm.example.com", "getcert list -I Example_Task Request ID 'Example_Task': status: MONITORING stuck: no key pair storage: type=NSSDB,location='/etc/pki/nssdb',nickname='ExampleCert',token='NSS Certificate DB' certificate: type=NSSDB,location='/etc/pki/nssdb',nickname='ExampleCert',token='NSS Certificate DB' signing request thumbprint (MD5): 503A8EDD DE2BE17E 5BAA3A57 D68C9C1B signing request thumbprint (SHA1): B411ECE4 D45B883A 75A6F14D 7E3037F1 D53625F4 CA: IPA issuer: CN=Certificate Authority,O=EXAMPLE.COM subject: CN=client.idm.example.com,O=EXAMPLE.COM expires: 2018-05-06 10:28:06 UTC key usage: digitalSignature,keyEncipherment eku: iso.org.dod.internet.security.mechanisms.8.2.2 certificate template/profile: IPSECIntermediateOffline pre-save command: post-save command: track: true auto-renew: true", "getcert list -I Example_Task Request ID 'Example_Task': status: MONITORING stuck: no key pair storage: type=FILE,location='/etc/pki/tls/private/private.key' certificate: type=FILE,location='/etc/pki/tls/certs/server.crt' CA: IPA issuer: CN=Certificate Authority,O=EXAMPLE.COM subject: CN=client.idm.example.com,O=EXAMPLE.COM expires: 2018-05-06 10:28:06 UTC eku: id-kp-serverAuth,id-kp-clientAuth pre-save command: post-save command: track: true auto-renew: true", "[scep] challenge_password_otp = yes", "systemctl restart certmonger" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/configuring_and_managing_identity_management/using-certmonger_configuring-and-managing-idm