title
stringlengths
4
168
content
stringlengths
7
1.74M
commands
sequencelengths
1
5.62k
url
stringlengths
79
342
Working with DNS in Identity Management
Working with DNS in Identity Management Red Hat Enterprise Linux 9 Managing the IdM-integrated DNS service Red Hat Customer Content Services
[ "Generated by NetworkManager search idm.example.com nameserver 127.0.0.1", "auto-generated by IPA installer [main] dns=default [global-dns] searches=USDDOMAIN [global-dns-domain-*] servers=127.0.0.1", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-presence-of-a-global-forwarder.yml", "--- - name: Playbook to ensure the presence of a global forwarder in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure the presence of a DNS global forwarder to 7.7.9.9 and 2001:db8::1:0 on port 53 ipadnsconfig: forwarders: - ip_address: 7.7.9.9 - ip_address: 2001:db8::1:0 port: 53 state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-presence-of-a-global-forwarder.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-absence-of-a-global-forwarder.yml", "--- - name: Playbook to ensure the absence of a global forwarder in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure the absence of a DNS global forwarder to 8.8.6.6 and 2001:4860:4860::8800 on port 53 ipadnsconfig: forwarders: - ip_address: 8.8.6.6 - ip_address: 2001:4860:4860::8800 port: 53 action: member state: absent", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-absence-of-a-global-forwarder.yml", "[...] tasks: - name: Ensure the presence of DNS global forwarder 8.8.6.7 ipadnsconfig: forwarders: - ip_address: 8.8.6.7 state: present", "[...] tasks: - name: Ensure the presence of DNS global forwarder 8.8.6.7 ipadnsconfig: forwarders: - ip_address: 8.8.6.7 action: member state: present", "[...] tasks: - name: Ensure the absence of DNS global forwarder 8.8.6.7 ipadnsconfig: forwarders: - ip_address: 8.8.6.7 state: absent", "[...] tasks: - name: Ensure the absence of DNS global forwarder 8.8.6.7 ipadnsconfig: forwarders: - ip_address: 8.8.6.7 action: member state: absent", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp set-configuration.yml set-forward-policy-to-first.yml", "--- - name: Playbook to set global forwarding policy to first hosts: ipaserver become: true tasks: - name: Set global forwarding policy to first. ipadnsconfig: ipaadmin_password: \"{{ ipaadmin_password }}\" forward_policy: first", "ansible-playbook --vault-password-file=password_file -v -i inventory.file set-forward-policy-to-first.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp disable-global-forwarders.yml disable-global-forwarders-copy.yml", "--- - name: Playbook to disable global DNS forwarders hosts: ipaserver become: true tasks: - name: Disable global forwarders. ipadnsconfig: ipaadmin_password: \"{{ ipaadmin_password }}\" forward_policy: none", "ansible-playbook --vault-password-file=password_file -v -i inventory.file disable-global-forwarders-copy.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp disallow-reverse-sync.yml disallow-reverse-sync-copy.yml", "--- - name: Playbook to disallow reverse record synchronization hosts: ipaserver become: true tasks: - name: Disallow reverse record synchronization. ipadnsconfig: ipaadmin_password: \"{{ ipaadmin_password }}\" allow_sync_ptr: no", "ansible-playbook --vault-password-file=password_file -v -i inventory.file disallow-reverse-sync-copy.yml", "ipa dnszone-add newzone.idm.example.com", "ipa dnszone-del idm.example.com", "ipa dnszone-mod --retry 1800", "ipa dnszone-mod --allow-transfer=192.0.2.1;198.51.100.1;203.0.113.1 idm.example.com", "ssh 192.0.2.1", "dig @ipa-server zone_name AXFR", "cd /usr/share/doc/ansible-freeipa/playbooks/dnszone", "[ipaserver] server.idm.example.com", "cp dnszone-present.yml dnszone-present-copy.yml", "--- - name: Ensure dnszone present hosts: ipaserver become: true tasks: - name: Ensure zone is present. ipadnszone: ipaadmin_password: \"{{ ipaadmin_password }}\" zone_name: zone.idm.example.com state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file dnszone-present-copy.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnszone", "[ipaserver] server.idm.example.com", "cp dnszone-all-params.yml dnszone-all-params-copy.yml", "--- - name: Ensure dnszone present hosts: ipaserver become: true tasks: - name: Ensure zone is present. ipadnszone: ipaadmin_password: \"{{ ipaadmin_password }}\" zone_name: zone.idm.example.com allow_sync_ptr: true dynamic_update: true dnssec: true allow_transfer: - 1.1.1.1 - 2.2.2.2 allow_query: - 1.1.1.1 - 2.2.2.2 forwarders: - ip_address: 8.8.8.8 - ip_address: 8.8.4.4 port: 52 serial: 1234 refresh: 3600 retry: 900 expire: 1209600 minimum: 3600 ttl: 60 default_ttl: 90 name_server: server.idm.example.com. admin_email: [email protected] nsec3param_rec: \"1 7 100 0123456789abcdef\" skip_overlap_check: true skip_nameserver_check: true state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file dnszone-all-params-copy.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnszone", "[ipaserver] server.idm.example.com", "cp dnszone-reverse-from-ip.yml dnszone-reverse-from-ip-copy.yml", "--- - name: Ensure dnszone present hosts: ipaserver become: true tasks: - name: Ensure zone for reverse DNS lookup is present. ipadnszone: ipaadmin_password: \"{{ ipaadmin_password }}\" name_from_ip: 192.168.1.2/24 state: present register: result - name: Display inferred zone name. debug: msg: \"Zone name: {{ result.dnszone.name }}\"", "ansible-playbook --vault-password-file=password_file -v -i inventory.file dnszone-reverse-from-ip-copy.yml", "dig -t SRV +short _kerberos._tcp.idm.example.com 0 100 88 idmserver-01.idm.example.com. 0 100 88 idmserver-02.idm.example.com.", "dig -t SRV +short _kerberos._tcp.idm.example.com _kerberos._tcp.germany._locations.idm.example.com. 0 100 88 idmserver-01.idm.example.com. 50 100 88 idmserver-02.idm.example.com.", "ipa location-add germany ---------------------------- Added IPA location \"germany\" ---------------------------- Location name: germany", "systemctl restart named-pkcs11", "ipa location-find ----------------------- 2 IPA locations matched ----------------------- Location name: australia Location name: germany ----------------------------- Number of entries returned: 2 -----------------------------", "ipa server-mod idmserver-01.idm.example.com --location=germany ipa: WARNING: Service named-pkcs11.service requires restart on IPA server idmserver-01.idm.example.com to apply configuration changes. -------------------------------------------------- Modified IPA server \"idmserver-01.idm.example.com\" -------------------------------------------------- Servername: idmserver-01.idm.example.com Min domain level: 0 Max domain level: 1 Location: germany Enabled server roles: DNS server, NTP server", "systemctl restart named-pkcs11", "nameserver 10.10.0.1 nameserver 10.10.0.2", "nameserver 10.50.0.1 nameserver 10.50.0.3", "nameserver 10.30.0.1", "nameserver 10.30.0.1", "dig -t SRV +short _kerberos._tcp.idm.example.com 0 100 88 idmserver-01.idm.example.com. 0 100 88 idmserver-02.idm.example.com.", "dig -t SRV +short _kerberos._tcp.idm.example.com _kerberos._tcp.germany._locations.idm.example.com. 0 100 88 idmserver-01.idm.example.com. 50 100 88 idmserver-02.idm.example.com.", "cd ~/ MyPlaybooks /", "cp /usr/share/doc/ansible-freeipa/playbooks/location/location-present.yml location-present-copy.yml", "--- - name: location present example hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure that the \"germany\" location is present ipalocation: ipaadmin_password: \"{{ ipaadmin_password }}\" name: germany", "ansible-playbook --vault-password-file=password_file -v -i inventory location-present-copy.yml", "cd ~/ MyPlaybooks /", "cp /usr/share/doc/ansible-freeipa/playbooks/location/location-absent.yml location-absent-copy.yml", "--- - name: location absent example hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure that the \"germany\" location is absent ipalocation: ipaadmin_password: \"{{ ipaadmin_password }}\" name: germany state: absent", "ansible-playbook --vault-password-file=password_file -v -i inventory location-absent-copy.yml", "[user@server ~]USD ipa dnsconfig-mod --forwarder= 10.10.0.1 Server will check DNS forwarder(s). This may take some time, please wait Global forwarders: 10.10.0.1 IPA DNS servers: server.example.com", "[user@server ~]USD ipa dnsconfig-show Global forwarders: 10.10.0.1 IPA DNS servers: server.example.com", "[user@server ~]USD ipa dnsforwardzone-add forward.example.com. --forwarder= 10.10.0.14 --forwarder= 10.10.1.15 --forward-policy=first Zone name: forward.example.com. Zone forwarders: 10.10.0.14, 10.10.1.15 Forward policy: first", "[user@server ~]USD ipa dnsforwardzone-show forward.example.com. Zone name: forward.example.com. Zone forwarders: 10.10.0.14, 10.10.1.15 Forward policy: first", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp set-configuration.yml establish-global-forwarder.yml", "--- - name: Playbook to establish a global forwarder in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Create a DNS global forwarder to 8.8.6.6 and 2001:4860:4860::8800 ipadnsconfig: forwarders: - ip_address: 8.8.6.6 - ip_address: 2001:4860:4860::8800 port: 53 forward_policy: first allow_sync_ptr: true", "ansible-playbook --vault-password-file=password_file -v -i inventory.file establish-global-forwarder.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-presence-of-a-global-forwarder.yml", "--- - name: Playbook to ensure the presence of a global forwarder in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure the presence of a DNS global forwarder to 7.7.9.9 and 2001:db8::1:0 on port 53 ipadnsconfig: forwarders: - ip_address: 7.7.9.9 - ip_address: 2001:db8::1:0 port: 53 state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-presence-of-a-global-forwarder.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-absence-of-a-global-forwarder.yml", "--- - name: Playbook to ensure the absence of a global forwarder in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure the absence of a DNS global forwarder to 8.8.6.6 and 2001:4860:4860::8800 on port 53 ipadnsconfig: forwarders: - ip_address: 8.8.6.6 - ip_address: 2001:4860:4860::8800 port: 53 action: member state: absent", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-absence-of-a-global-forwarder.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cat disable-global-forwarders.yml --- - name: Playbook to disable global DNS forwarders hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Disable global forwarders. ipadnsconfig: forward_policy: none", "ansible-playbook --vault-password-file=password_file -v -i inventory.file disable-global-forwarders.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-presence-forwardzone.yml", "- 8.8.8.8", "--- - name: Playbook to ensure the presence of a dnsforwardzone in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure the presence of a dnsforwardzone for example.com to 8.8.8.8 ipadnsforwardzone: ipaadmin_password: \"{{ ipaadmin_password }}\" name: example.com forwarders: - 8.8.8.8 forwardpolicy: first skip_overlap_check: true state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-presence-forwardzone.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-presence-multiple-forwarders.yml", "- 8.8.8.8 - 4.4.4.4", "--- - name: name: Playbook to ensure the presence of multiple forwarders in a dnsforwardzone in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure presence of 8.8.8.8 and 4.4.4.4 forwarders in dnsforwardzone for example.com ipadnsforwardzone: ipaadmin_password: \"{{ ipaadmin_password }}\" name: example.com forwarders: - 8.8.8.8 - 4.4.4.4 state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-presence-multiple-forwarders.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-disabled-forwardzone.yml", "--- - name: Playbook to ensure a dnsforwardzone is disabled in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure a dnsforwardzone for example.com is disabled ipadnsforwardzone: ipaadmin_password: \"{{ ipaadmin_password }}\" name: example.com state: disabled", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-disabled-forwardzone.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsconfig", "[ipaserver] server.idm.example.com", "cp forwarders-absent.yml ensure-absence-forwardzone.yml", "--- - name: Playbook to ensure the absence of a dnsforwardzone in IdM DNS hosts: ipaserver vars_files: - /home/user_name/MyPlaybooks/secret.yml tasks: - name: Ensure the absence of a dnsforwardzone for example.com ipadnsforwardzone: ipaadmin_password: \"{{ ipaadmin_password }}\" name: example.com state: absent", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-absence-forwardzone.yml", "ipa dnsrecord-add zone_name record_name -- record_type_option=data", "ipa dnsrecord-add idm.example.com host1 --a-rec=192.168.122.123", "ipa dnsrecord-del example.com www --a-rec 192.0.2.1", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsrecord", "[ipaserver] server.idm.example.com", "cp ensure-A-and-AAAA-records-are-present.yml ensure-A-and-AAAA-records-are-present-copy.yml", "--- - name: Ensure A and AAAA records are present hosts: ipaserver become: true gather_facts: false tasks: # Ensure A and AAAA records are present - name: Ensure that 'host1' has A and AAAA records. ipadnsrecord: ipaadmin_password: \"{{ ipaadmin_password }}\" zone_name: idm.example.com records: - name: host1 a_ip_address: 192.168.122.123 - name: host1 aaaa_ip_address: ::1", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-A-and-AAAA-records-are-present-copy.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsrecord", "[ipaserver] server.idm.example.com", "cp ensure-dnsrecord-with-reverse-is-present.yml ensure-dnsrecord-with-reverse-is-present-copy.yml", "--- - name: Ensure DNS Record is present. hosts: ipaserver become: true gather_facts: false tasks: # Ensure that dns record is present - ipadnsrecord: ipaadmin_password: \"{{ ipaadmin_password }}\" name: host1 zone_name: idm.example.com ip_address: 192.168.122.45 create_reverse: true state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-dnsrecord-with-reverse-is-present-copy.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsrecord", "[ipaserver] server.idm.example.com", "cp ensure-presence-multiple-records.yml ensure-presence-multiple-records-copy.yml", "--- - name: Test multiple DNS Records are present. hosts: ipaserver become: true gather_facts: false tasks: # Ensure that multiple dns records are present - ipadnsrecord: ipaadmin_password: \"{{ ipaadmin_password }}\" records: - name: host1 zone_name: idm.example.com a_rec: 192.168.122.112 a_rec: 192.168.122.122 - name: host1 zone_name: idm.example.com aaaa_rec: ::1", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-presence-multiple-records-copy.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsrecord", "[ipaserver] server.idm.example.com", "cp ensure-CNAME-record-is-present.yml ensure-CNAME-record-is-present-copy.yml", "--- - name: Ensure that 'www.idm.example.com' and 'ftp.idm.example.com' CNAME records point to 'host03.idm.example.com'. hosts: ipaserver become: true gather_facts: false tasks: - ipadnsrecord: ipaadmin_password: \"{{ ipaadmin_password }}\" zone_name: idm.example.com records: - name: www cname_hostname: host03 - name: ftp cname_hostname: host03", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-CNAME-record-is-present.yml", "cd /usr/share/doc/ansible-freeipa/playbooks/dnsrecord", "[ipaserver] server.idm.example.com", "cp ensure-SRV-record-is-present.yml ensure-SRV-record-is-present-copy.yml", "--- - name: Test multiple DNS Records are present. hosts: ipaserver become: true gather_facts: false tasks: # Ensure a SRV record is present - ipadnsrecord: ipaadmin_password: \"{{ ipaadmin_password }}\" name: _kerberos._udp.idm.example.com srv_rec: '10 50 88 idm.example.com' zone_name: idm.example.com state: present", "ansible-playbook --vault-password-file=password_file -v -i inventory.file ensure-SRV-record-is-present.yml", "kinit admin", "ipa host-add-principal demo.example.com --principal= demo", "[libdefaults] dns_canonicalize_hostname = true" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html-single/working_with_dns_in_identity_management/index
Chapter 5. Important changes to external kernel parameters
Chapter 5. Important changes to external kernel parameters This chapter provides system administrators with a summary of significant changes in the kernel shipped with Red Hat Enterprise Linux 8.8. These changes could include for example added or updated proc entries, sysctl , and sysfs default values, boot parameters, kernel configuration options, or any noticeable behavior changes. New kernel parameters nomodeset With this kernel parameter, you can disable kernel mode setting. DRM drivers will not perform display-mode changes or accelerated rendering. Only the system frame buffer will be available for use if this was set-up by the firmware or boot loader. nomodeset is useful as fallback, or for testing and debugging. sev=option[,option... ] [X86-64] For more information, see Documentation/x86/x86_64/boot-options.rst . amd_pstate=[X86] disable: Do not enable amd_pstate as the default scaling driver for the supported processors. passive: Use amd_pstate as a scaling driver. The driver requests a desired performance on this abstract scale and the power management firmware translates the requests into actual hardware states, such as core frequency, data fabric and memory clocks and so on. retbleed=ibpb,nosmt This parameter is similar to ibpb and is an alternative for systems which do not have STIBP. With this parameter you can disable SMT when STIBP is not available. Updated kernel parameters amd_iommu=[HW,X86-64] With this kernel parameter, you can pass parameters to the AMD IOMMU driver in the system. Possible values are: fullflush: Deprecated, equivalent to iommu.strict=1 . off: do not initialize any AMD IOMMU found in the system. force_isolation: Force device isolation for all devices. The IOMMU driver is not allowed anymore to lift isolation requirements as needed. This option does not override iommu=pt . force_enable: Force enable the IOMMU on platforms known to be buggy with IOMMU enabled. Use this option with care. crashkernel=size[KMG][@offset[KMG]] [KNL] Using kexec , Linux can switch to a crash kernel upon panic. This parameter reserves the physical memory region [offset, offset + size] for that kernel image. If @offset is omitted, then a suitable offset is selected automatically. [KNL, X86-64, ARM64] Select a region under 4G first, and fall back to reserve region above 4G when @offset has not been specified. For more details, see Documentation/admin-guide/kdump/kdump.rst . crashkernel=size[KMG],low [KNL, X86-64, ARM64] With this parameter, you can specify low range under 4G for the second kernel. When crashkernel=X,high is passed, that require some amount of low memory, for example swiotlb requires at least 64M+32K low memory, also enough extra low memory is needed to make sure DMA buffers for 32-bit devices will not run out. Kernel would try to allocate default size of memory below 4G automatically. The default size is platform dependent. x86: max(swiotlb_size_or_default() + 8MiB, 256MiB) arm64: 128MiB 0: to disable low allocation. This parameter will be ignored when crashkernel=X,high is not used or memory reserved is below 4G. [KNL, ARM64] With this parameter, you can specify a low range in the DMA zone for the crash dump kernel. This paramete will be ignored when crashkernel=X,high is not used. intel_iommu=[DMAR] The kernel parameter for setting the Intel IOMMU driver (DMAR) option. on: Enable intel iommu driver. off: Disable intel iommu driver. igfx_off [Default Off]: By default, gfx is mapped as normal device. If a gfx device has a dedicated DMAR unit, the DMAR unit is bypassed by not enabling DMAR with this option. In this case, the gfx device will use physical address for DMA. strict [Default Off]: Deprecated, equivalent to iommu.strict=1 . sp_off [Default Off]: By default, super page will be supported if Intel IOMMU has the capability. With this option, super page will not be supported. sm_on [Default Off]: By default, scalable mode will be disabled even if the hardware advertises that it has support for the scalable mode translation. With this option set, scalable mode will be used on hardware which claims to support it. tboot_noforce [Default Off]: Do not force the Intel IOMMU enabled under tboot . By default, tboot will force Intel IOMMU on, which could harm performance of some high-throughput devices like 40GBit network cards, even if identity mapping is enabled. Note Using this option lowers the security provided by tboot because it makes the system vulnerable to DMA attacks. iommu.strict=[ARM64,X86] With this kernel parameter, you can configure TLB invalidation behavior. Format: { "0" | "1" } 0 - Lazy mode. Request that DMA unmap operations use deferred invalidation of hardware TLBs, for increased throughput at the cost of reduced device isolation. Will fall back to strict mode if not supported by the relevant IOMMU driver. 1 - Strict mode. DMA unmap operations invalidate IOMMU hardware TLBs synchronously. unset - Use value of CONFIG_IOMMU_DEFAULT_DMA_{LAZY,STRICT} . Note On x86, strict mode specified via one of the legacy driver-specific options takes precedence. mem_encrypt=[X86-64] The kernel parameter for setting the AMD Secure Memory Encryption (SME) control. Valid arguments: on, off Default depends on the kernel configuration option: on (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n) mem_encrypt=on: Activate SME mem_encrypt=off: Do not activate SME Refer to Documentation/virt/kvm/x86/amd-memory-encryption.rst for details on when memory encryption can be activated. retbleed=[X86] With this kernel parameter, you can control mitigation of RETBleed (Arbitrary Speculative Code Execution with Return Instructions) vulnerability. AMD-based UNRET and IBPB mitigations alone do not stop sibling threads from influencing the predictions of other sibling threads. For that reason, STIBP is used on processors that support it, and mitigate SMT on processors that do not. off - no mitigation auto - automatically select a migitation auto,nosmt - automatically select a mitigation, disabling SMT if necessary for the full mitigation (only on Zen1 and older without STIBP). ibpb - On AMD, mitigate short speculation windows on basic block boundaries too. Safe, highest performance impact. It also enables STIBP if present. Not suitable on Intel. unret - Force enable untrained return thunks, only effective on AMD f15h-f17h based systems. unret,nosmt - Like unret, but will disable SMT when STIBP is not available. This is the alternative for systems which do not have STIBP. swiotlb=[ARM,IA-64,PPC,MIPS,X86] With this kernel parameter, you can configure the behavior of I/O TLB slabs. Format: { <int> [,<int>] | force | noforce } <int> - Number of I/O TLB slabs <int> - Second integer after comma. Number of swiotlb areas with their own lock. Must be power of 2 . force - force using of bounce buffers even if they would not be automatically used by the kernel noforce - Never use bounce buffers (for debugging) New sysctl parameters page_lock_unfairness This value determines the number of times that the page lock can be stolen from under a waiter. After the lock is stolen the number of times specified in this file (the default is 5 ), the fair lock handoff semantics will apply, and the waiter will only be awakened if the lock can be taken. rps_default_mask The default RPS CPU mask used on newly created network devices. An empty mask means RPS disabled by default.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/8.8_release_notes/kernel_parameters_changes
Making open source more inclusive
Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message .
null
https://docs.redhat.com/en/documentation/red_hat_build_of_quarkus/3.8/html/openid_connect_oidc_client_and_token_propagation/making-open-source-more-inclusive
14.4. Configuration examples
14.4. Configuration examples The following examples provide real-world demonstrations of how SELinux complements the Samba server and how full function of the Samba server can be maintained. 14.4.1. Sharing directories you create The following example creates a new directory, and shares that directory through Samba: Confirm that the samba , samba-common , and samba-client packages are installed: If any of these packages are not installed, install them by using the yum utility as root: Use the mkdir utility as root to create a new top-level directory to share files through Samba: Use the touch utility root to create an empty file. This file is used later to verify the Samba share mounted correctly: SELinux allows Samba to read and write to files labeled with the samba_share_t type, as long as the /etc/samba/smb.conf file and Linux permissions are set accordingly. Enter the following command as root to add the label change to file-context configuration: Use the restorecon utility as root to apply the label changes: Edit /etc/samba/smb.conf as root. Add the following to the bottom of this file to share the /myshare/ directory through Samba: A Samba account is required to mount a Samba file system. Enter the following command as root to create a Samba account, where username is an existing Linux user. For example, smbpasswd -a testuser creates a Samba account for the Linux testuser user: If you enter the above command, specifying a user name of an account that does not exist on the system, it causes a Cannot locate Unix account for ' username '! error. Start the Samba service: Enter the following command to list the available shares, where username is the Samba account added in step 7. When prompted for a password, enter the password assigned to the Samba account in step 7 (version numbers may differ): Use the mkdir utility as root to create a new directory. This directory will be used to mount the myshare Samba share: Enter the following command as root to mount the myshare Samba share to /test/ , replacing username with the user name from step 7: Enter the password for username , which was configured in step 7. Enter the following command to view the file1 file created in step 3: 14.4.2. Sharing a website It may not be possible to label files with the samba_share_t type, for example, when wanting to share a website in the /var/www/html/ directory. For these cases, use the samba_export_all_ro Boolean to share any file or directory (regardless of the current label), allowing read only permissions, or the samba_export_all_rw Boolean to share any file or directory (regardless of the current label), allowing read and write permissions. The following example creates a file for a website in /var/www/html/ , and then shares that file through Samba, allowing read and write permissions. This example assumes the httpd , samba , samba-common , samba-client , and wget packages are installed: As the root user, create a /var/www/html/file1.html file. Copy and paste the following content into this file: Enter the following command to view the SELinux context of file1.html : The file is labeled with the httpd_sys_content_t . By default, the Apache HTTP Server can access this type, but Samba cannot. Start the Apache HTTP Server: Change into a directory your user has write access to, and enter the following command. Unless there are changes to the default configuration, this command succeeds: Edit /etc/samba/smb.conf as root. Add the following to the bottom of this file to share the /var/www/html/ directory through Samba: The /var/www/html/ directory is labeled with the httpd_sys_content_t type. By default, Samba cannot access files and directories labeled with the this type, even if Linux permissions allow it. To allow Samba access, enable the samba_export_all_ro Boolean: Do not use the -P option if you do not want the change to persist across reboots. Note that enabling the samba_export_all_ro Boolean allows Samba to access any type. Start the Samba service:
[ "~]USD rpm -q samba samba-common samba-client package samba is not installed package samba-common is not installed package samba-client is not installed", "~]# yum install package-name", "~]# mkdir /myshare", "~]# touch /myshare/file1", "~]# semanage fcontext -a -t samba_share_t \"/myshare(/.*)?\"", "~]# restorecon -R -v /myshare restorecon reset /myshare context unconfined_u:object_r:default_t:s0->system_u:object_r:samba_share_t:s0 restorecon reset /myshare/file1 context unconfined_u:object_r:default_t:s0->system_u:object_r:samba_share_t:s0", "[myshare] comment = My share path = /myshare public = yes writable = no", "~]# smbpasswd -a testuser New SMB password: Enter a password Retype new SMB password: Enter the same password again Added user testuser.", "~]# systemctl start smb.service", "~]USD smbclient -U username -L localhost Enter username 's password: Domain=[ HOSTNAME ] OS=[Unix] Server=[Samba 3.4.0-0.41.el6] Sharename Type Comment --------- ---- ------- myshare Disk My share IPCUSD IPC IPC Service (Samba Server Version 3.4.0-0.41.el6) username Disk Home Directories Domain=[ HOSTNAME ] OS=[Unix] Server=[Samba 3.4.0-0.41.el6] Server Comment --------- ------- Workgroup Master --------- -------", "~]# mkdir /test/", "~]# mount //localhost/myshare /test/ -o user= username", "~]USD ls /test/ file1", "<html> <h2>File being shared through the Apache HTTP Server and Samba.</h2> </html>", "~]USD ls -Z /var/www/html/file1.html -rw-r--r--. root root unconfined_u:object_r:httpd_sys_content_t:s0 /var/www/html/file1.html", "~]# systemctl start httpd.service", "~]USD wget http://localhost/file1.html Resolving localhost... 127.0.0.1 Connecting to localhost|127.0.0.1|:80... connected. HTTP request sent, awaiting response... 200 OK Length: 84 [text/html] Saving to: `file1.html.1' 100%[=======================>] 84 --.-K/s in 0s `file1.html.1' saved [84/84]", "[website] comment = Sharing a website path = /var/www/html/ public = no writable = no", "~]# setsebool -P samba_export_all_ro on", "~]# systemctl start smb.service" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/selinux_users_and_administrators_guide/sect-managing_confined_services-samba-configuration_examples
Chapter 4. Deploying functions
Chapter 4. Deploying functions You can deploy your functions to the cluster by using the kn func tool. 4.1. Deploying a function You can deploy a function to your cluster as a Knative service by using the kn func deploy command. If the targeted function is already deployed, it is updated with a new container image that is pushed to a container image registry, and the Knative service is updated. Prerequisites The OpenShift Serverless Operator and Knative Serving are installed on the cluster. You have installed the Knative ( kn ) CLI. You have created a project or have access to a project with the appropriate roles and permissions to create applications and other workloads in OpenShift Container Platform. You must have already created and initialized the function that you want to deploy. Procedure Deploy a function: USD kn func deploy [-n <namespace> -p <path> -i <image>] Example output Function deployed at: http://func.example.com If no namespace is specified, the function is deployed in the current namespace. The function is deployed from the current directory, unless a path is specified. The Knative service name is derived from the project name, and cannot be changed using this command. Note You can create a serverless function with a Git repository URL by using Import from Git or Create Serverless Function in the +Add view of the Developer perspective.
[ "kn func deploy [-n <namespace> -p <path> -i <image>]", "Function deployed at: http://func.example.com" ]
https://docs.redhat.com/en/documentation/red_hat_openshift_serverless/1.35/html/functions/serverless-functions-deploying
Chapter 3. Deprecated functionalities
Chapter 3. Deprecated functionalities This section lists deprecated functionalities in Red Hat Developer Hub 1.3. 3.1. spec.application.image , spec.application.replicas and spec.application.imagePullSecrets fields are deprecated spec.application.image , spec.application.replicas and spec.application.imagePullSecrets fields are deprecated in v1alpha2 in favour of spec.deployment . Procedure To update your Developer Hub Operation configuration: Remove the spec.application.image , spec.application.replicas and spec.application.imagePullSecrets fields from the Operator configuration. For example: spec: application: replicas: 2 # &lt;1&gt; imagePullSecrets: # &lt;2&gt; - my-secret-name image: quay.io/my/my-rhdh:latest # &lt;3&gt; <1> Replica count. <2> Array of image pull secrets names. <3> Image name. Replace the removed fields with new spec.deployment fields. For example: spec: deployment: patch: spec: replicas: 2 # &lt;1&gt; imagePullSecrets: # &lt;2&gt; - name: my-secret-name template: metadata: labels: my: true spec: containers: - name: backstage-backend image: quay.io/my/my-rhdh:latest # &lt;3&gt; <1> Replica count. <2> Array of image pull secrets names. <3> Image name. Additional resources RHIDP-1138
[ "spec: application: replicas: 2 # &lt;1&gt; imagePullSecrets: # &lt;2&gt; - my-secret-name image: quay.io/my/my-rhdh:latest # &lt;3&gt;", "spec: deployment: patch: spec: replicas: 2 # &lt;1&gt; imagePullSecrets: # &lt;2&gt; - name: my-secret-name template: metadata: labels: my: true spec: containers: - name: backstage-backend image: quay.io/my/my-rhdh:latest # &lt;3&gt;" ]
https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.3/html/release_notes/deprecated-functionalities
Chapter 4. Red Hat Virtualization 4.4 Batch Update 2 (ovirt-4.4.3)
Chapter 4. Red Hat Virtualization 4.4 Batch Update 2 (ovirt-4.4.3) 4.1. Red Hat Virtualization Manager 4.4 for RHEL 8 x86_64 (RPMs) The following table outlines the packages included in the Red Hat Virtualization Manager 4.4.3 image. Table 4.1. Red Hat Virtualization Manager 4.4 for RHEL 8 x86_64 (RPMs) Name Version GConf2 3.2.6-22.el8.x86_64 NetworkManager 1.26.0-9.el8_3.x86_64 NetworkManager-libnm 1.26.0-9.el8_3.x86_64 NetworkManager-team 1.26.0-9.el8_3.x86_64 NetworkManager-tui 1.26.0-9.el8_3.x86_64 PackageKit 1.1.12-6.el8.x86_64 PackageKit-glib 1.1.12-6.el8.x86_64 abattis-cantarell-fonts 0.0.25-4.el8.noarch acl 2.2.53-1.el8.x86_64 adobe-mappings-cmap 20171205-3.el8.noarch adobe-mappings-cmap-deprecated 20171205-3.el8.noarch adobe-mappings-pdf 20180407-1.el8.noarch aide 0.16-14.el8.x86_64 alsa-lib 1.2.3.2-1.el8.x86_64 ansible 2.9.14-1.el8ae.noarch ansible-runner-service 1.0.6-3.el8ev.noarch aopalliance 1.0-17.module+el8+2598+06babf2e.noarch apache-commons-codec 1.11-3.module+el8+2598+06babf2e.noarch apache-commons-collections 3.2.2-10.module+el8.1.0+3366+6dfb954c.noarch apache-commons-compress 1.18-1.el8ev.noarch apache-commons-configuration 1.10-1.el8ev.noarch apache-commons-io 2.6-3.module+el8+2598+06babf2e.noarch apache-commons-jxpath 1.3-29.el8ev.noarch apache-commons-lang 2.6-21.module+el8.1.0+3366+6dfb954c.noarch apache-commons-logging 1.2-13.module+el8+2598+06babf2e.noarch apache-sshd 2.5.1-1.el8ev.noarch apr 1.6.3-11.el8.x86_64 apr-util 1.6.1-6.el8.x86_64 asciidoc 8.6.10-0.5.20180627gitf7c2274.el8.noarch atk 2.28.1-1.el8.x86_64 audit 3.0-0.17.20191104git1c2f876.el8.x86_64 audit-libs 3.0-0.17.20191104git1c2f876.el8.x86_64 authselect 1.2.1-2.el8.x86_64 authselect-compat 1.2.1-2.el8.x86_64 authselect-libs 1.2.1-2.el8.x86_64 autogen-libopts 5.18.12-8.el8.x86_64 avahi-libs 0.7-19.el8.x86_64 basesystem 11-5.el8.noarch bash 4.4.19-12.el8.x86_64 bea-stax-api 1.2.0-16.module+el8.1.0+3366+6dfb954c.noarch bind-export-libs 9.11.20-5.el8.x86_64 bind-libs 9.11.20-5.el8.x86_64 bind-libs-lite 9.11.20-5.el8.x86_64 bind-license 9.11.20-5.el8.noarch bind-utils 9.11.20-5.el8.x86_64 binutils 2.30-79.el8.x86_64 biosdevname 0.7.3-2.el8.x86_64 boost-regex 1.66.0-10.el8.x86_64 brotli 1.0.6-2.el8.x86_64 bzip2 1.0.6-26.el8.x86_64 bzip2-libs 1.0.6-26.el8.x86_64 c-ares 1.13.0-5.el8.x86_64 ca-certificates 2020.2.41-80.0.el8_2.noarch cairo 1.15.12-3.el8.x86_64 cairo-gobject 1.15.12-3.el8.x86_64 checkpolicy 2.9-1.el8.x86_64 chkconfig 1.13-2.el8.x86_64 chrony 3.5-1.el8.x86_64 cjose 0.6.1-2.module+el8+2454+f890a43a.x86_64 cloud-init 19.4-11.el8.noarch cloud-utils-growpart 0.31-1.el8.noarch cockpit 224.2-1.el8.x86_64 cockpit-bridge 224.2-1.el8.x86_64 cockpit-dashboard 224.2-1.el8.noarch cockpit-packagekit 224.2-1.el8.noarch cockpit-system 224.2-1.el8.noarch cockpit-ws 224.2-1.el8.x86_64 collectd 5.11.0-2.el8ost.x86_64 collectd-disk 5.11.0-2.el8ost.x86_64 collectd-postgresql 5.11.0-2.el8ost.x86_64 collectd-write_http 5.11.0-2.el8ost.x86_64 collectd-write_syslog 5.11.0-2.el8ost.x86_64 copy-jdk-configs 3.7-4.el8.noarch coreutils 8.30-8.el8.x86_64 coreutils-common 8.30-8.el8.x86_64 cpio 2.12-8.el8.x86_64 cracklib 2.9.6-15.el8.x86_64 cracklib-dicts 2.9.6-15.el8.x86_64 cronie 1.5.2-4.el8.x86_64 cronie-anacron 1.5.2-4.el8.x86_64 crontabs 1.11-16.20150630git.el8.noarch crypto-policies 20200713-1.git51d1222.el8.noarch crypto-policies-scripts 20200713-1.git51d1222.el8.noarch cryptsetup-libs 2.3.3-2.el8.x86_64 ctags 5.8-22.el8.x86_64 cups-libs 2.2.6-38.el8.x86_64 curl 7.61.1-14.el8.x86_64 cyrus-sasl-lib 2.1.27-5.el8.x86_64 dbus 1.12.8-11.el8.x86_64 dbus-common 1.12.8-11.el8.noarch dbus-daemon 1.12.8-11.el8.x86_64 dbus-glib 0.110-2.el8.x86_64 dbus-libs 1.12.8-11.el8.x86_64 dbus-tools 1.12.8-11.el8.x86_64 dejavu-fonts-common 2.35-6.el8.noarch dejavu-sans-mono-fonts 2.35-6.el8.noarch device-mapper 1.02.171-5.el8.x86_64 device-mapper-event 1.02.171-5.el8.x86_64 device-mapper-event-libs 1.02.171-5.el8.x86_64 device-mapper-libs 1.02.171-5.el8.x86_64 device-mapper-persistent-data 0.8.5-4.el8.x86_64 dhcp-client 4.3.6-41.el8.x86_64 dhcp-common 4.3.6-41.el8.noarch dhcp-libs 4.3.6-41.el8.x86_64 diffutils 3.6-6.el8.x86_64 dmidecode 3.2-6.el8.x86_64 dnf 4.2.23-4.el8.noarch dnf-data 4.2.23-4.el8.noarch dnf-plugin-subscription-manager 1.27.16-1.el8.x86_64 dnf-plugins-core 4.0.17-5.el8.noarch docbook-dtds 1.0-69.el8.noarch docbook-style-xsl 1.79.2-7.el8.noarch dracut 049-95.git20200804.el8.x86_64 dracut-config-generic 049-95.git20200804.el8.x86_64 dracut-config-rescue 049-95.git20200804.el8.x86_64 dracut-network 049-95.git20200804.el8.x86_64 dracut-squash 049-95.git20200804.el8.x86_64 dwz 0.12-9.el8.x86_64 e2fsprogs 1.45.6-1.el8.x86_64 e2fsprogs-libs 1.45.6-1.el8.x86_64 eap7-FastInfoset 1.2.13-10.redhat_1.1.el8eap.noarch eap7-activemq-artemis-cli 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-commons 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-core-client 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-dto 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-hornetq-protocol 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-hqclient-protocol 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-jdbc-store 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-jms-client 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-jms-server 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-journal 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-native 1.0.2-1.redhat_00001.1.el8eap.noarch eap7-activemq-artemis-ra 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-selector 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-server 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-service-extensions 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-activemq-artemis-tools 2.9.0-5.redhat_00011.1.el8eap.noarch eap7-aesh-extensions 1.8.0-1.redhat_00001.1.el8eap.noarch eap7-aesh-readline 2.0.0-1.redhat_00001.1.el8eap.noarch eap7-agroal-api 1.3.0-1.redhat_00001.1.el8eap.noarch eap7-agroal-narayana 1.3.0-1.redhat_00001.1.el8eap.noarch eap7-agroal-pool 1.3.0-1.redhat_00001.1.el8eap.noarch eap7-antlr 2.7.7-54.redhat_7.1.el8eap.noarch eap7-apache-commons-beanutils 1.9.4-1.redhat_00002.1.el8eap.noarch eap7-apache-commons-cli 1.3.1-3.redhat_2.1.el8eap.noarch eap7-apache-commons-codec 1.14.0-1.redhat_00001.1.el8eap.noarch eap7-apache-commons-collections 3.2.2-9.redhat_2.1.el8eap.noarch eap7-apache-commons-io 2.5.0-4.redhat_3.1.el8eap.noarch eap7-apache-commons-lang 3.10.0-1.redhat_00001.1.el8eap.noarch eap7-apache-commons-lang2 2.6.0-1.redhat_7.1.el8eap.noarch eap7-apache-cxf 3.3.7-1.redhat_00001.1.el8eap.noarch eap7-apache-cxf-rt 3.3.7-1.redhat_00001.1.el8eap.noarch eap7-apache-cxf-services 3.3.7-1.redhat_00001.1.el8eap.noarch eap7-apache-cxf-tools 3.3.7-1.redhat_00001.1.el8eap.noarch eap7-apache-mime4j 0.6.0-4.redhat_7.1.el8eap.noarch eap7-artemis-wildfly-integration 1.0.2-4.redhat_1.1.el8eap.noarch eap7-atinject 1.0.0-4.redhat_00002.1.el8eap.noarch eap7-avro 1.7.6-7.redhat_2.1.el8eap.noarch eap7-azure-storage 6.1.0-1.redhat_1.1.el8eap.noarch eap7-bouncycastle-mail 1.65.0-1.redhat_00001.1.el8eap.noarch eap7-bouncycastle-pkix 1.65.0-1.redhat_00001.1.el8eap.noarch eap7-bouncycastle-prov 1.65.0-1.redhat_00001.1.el8eap.noarch eap7-byte-buddy 1.9.11-1.redhat_00002.1.el8eap.noarch eap7-caffeine 2.6.2-3.redhat_1.1.el8eap.noarch eap7-cal10n 0.8.1-6.redhat_1.1.el8eap.noarch eap7-codehaus-jackson-core-asl 1.9.13-10.redhat_00007.1.el8eap.noarch eap7-codehaus-jackson-jaxrs 1.9.13-10.redhat_00007.1.el8eap.noarch eap7-codehaus-jackson-mapper-asl 1.9.13-10.redhat_00007.1.el8eap.noarch eap7-codehaus-jackson-xc 1.9.13-10.redhat_00007.1.el8eap.noarch eap7-codemodel 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-commons-logging-jboss-logging 1.0.0-1.Final_redhat_1.1.el8eap.noarch eap7-cryptacular 1.2.4-1.redhat_00001.1.el8eap.noarch eap7-cxf-xjc-boolean 3.3.0-1.redhat_00001.1.el8eap.noarch eap7-cxf-xjc-bug986 3.3.0-1.redhat_00001.1.el8eap.noarch eap7-cxf-xjc-dv 3.3.0-1.redhat_00001.1.el8eap.noarch eap7-cxf-xjc-runtime 3.3.0-1.redhat_00001.1.el8eap.noarch eap7-cxf-xjc-ts 3.3.0-1.redhat_00001.1.el8eap.noarch eap7-dom4j 2.1.3-1.redhat_00001.1.el8eap.noarch eap7-ecj 4.6.1-3.redhat_1.1.el8eap.noarch eap7-eclipse-jgit 5.0.2.201807311906-2.r_redhat_00001.1.el8eap.noarch eap7-glassfish-concurrent 1.0.0-4.redhat_1.1.el8eap.noarch eap7-glassfish-jaf 1.2.1-1.redhat_00002.1.el8eap.noarch eap7-glassfish-javamail 1.6.4-2.redhat_00001.1.el8eap.noarch eap7-glassfish-jsf 2.3.9-11.SP12_redhat_00001.1.el8eap.noarch eap7-glassfish-json 1.1.6-2.redhat_00001.1.el8eap.noarch eap7-gnu-getopt 1.0.13-6.redhat_5.1.el8eap.noarch eap7-gson 2.8.2-1.redhat_5.1.el8eap.noarch eap7-guava 25.0.0-2.redhat_1.1.el8eap.noarch eap7-h2database 1.4.193-6.redhat_2.1.el8eap.noarch eap7-hal-console 3.2.10-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-beanvalidation-api 2.0.2-1.redhat_00001.1.el8eap.noarch eap7-hibernate-commons-annotations 5.0.5-1.Final_redhat_00002.1.el8eap.noarch eap7-hibernate-core 5.3.18-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-entitymanager 5.3.18-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-envers 5.3.18-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-search-backend-jms 5.10.7-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-search-engine 5.10.7-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-search-orm 5.10.7-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-search-serialization-avro 5.10.7-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-validator 6.0.20-1.Final_redhat_00001.1.el8eap.noarch eap7-hibernate-validator-cdi 6.0.20-1.Final_redhat_00001.1.el8eap.noarch eap7-hornetq-commons 2.4.7-7.Final_redhat_2.1.el8eap.noarch eap7-hornetq-core-client 2.4.7-7.Final_redhat_2.1.el8eap.noarch eap7-hornetq-jms-client 2.4.7-7.Final_redhat_2.1.el8eap.noarch eap7-httpcomponents-asyncclient 4.1.4-1.redhat_00001.1.el8eap.noarch eap7-httpcomponents-client 4.5.12-1.redhat_00001.1.el8eap.noarch eap7-httpcomponents-core 4.4.13-1.redhat_00001.1.el8eap.noarch eap7-infinispan-cachestore-jdbc 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-cachestore-remote 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-client-hotrod 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-commons 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-core 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-hibernate-cache-commons 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-hibernate-cache-spi 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-infinispan-hibernate-cache-v53 9.4.19-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-common-api 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-common-impl 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-common-spi 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-core-api 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-core-impl 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-deployers-common 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-jdbc 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-ironjacamar-validator 1.4.22-1.Final_redhat_00001.1.el8eap.noarch eap7-istack-commons-runtime 3.0.10-1.redhat_00001.1.el8eap.noarch eap7-istack-commons-tools 3.0.10-1.redhat_00001.1.el8eap.noarch eap7-jackson-annotations 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jackson-core 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jackson-coreutils 1.0.0-1.redhat_1.1.el8eap.noarch eap7-jackson-databind 2.10.4-1.redhat_00002.1.el8eap.noarch eap7-jackson-datatype-jdk8 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jackson-datatype-jsr310 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jackson-jaxrs-base 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jackson-jaxrs-json-provider 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jackson-module-jaxb-annotations 2.10.4-1.redhat_00001.1.el8eap.noarch eap7-jaegertracing-jaeger-client-java-core 0.34.3-1.redhat_00001.1.el8eap.noarch eap7-jaegertracing-jaeger-client-java-thrift 0.34.3-1.redhat_00001.1.el8eap.noarch eap7-jakarta-el 3.0.3-1.redhat_00002.1.el8eap.noarch eap7-jakarta-security-enterprise-api 1.0.2-3.redhat_00001.1.el8eap.noarch eap7-jandex 2.1.2-1.Final_redhat_00001.1.el8eap.noarch eap7-jansi 1.18.0-1.redhat_00001.1.el8eap.noarch eap7-jasypt 1.9.3-1.redhat_00001.1.el8eap.noarch eap7-java-classmate 1.3.4-1.redhat_1.1.el8eap.noarch eap7-javaee-jpa-spec 2.2.3-1.redhat_00001.1.el8eap.noarch eap7-javaee-security-api 1.0.0-2.redhat_1.1.el8eap.noarch eap7-javaee-security-soteria-enterprise 1.0.1-3.redhat_00002.1.el8eap.noarch eap7-javaewah 1.1.6-1.redhat_00001.1.el8eap.noarch eap7-javapackages-tools 3.4.1-5.15.6.el8eap.noarch eap7-javassist 3.23.2-2.GA_redhat_00001.1.el8eap.noarch eap7-jaxb-jxc 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-jaxb-runtime 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-jaxb-xjc 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-jaxbintros 1.0.3-1.GA_redhat_00001.1.el8eap.noarch eap7-jaxen 1.1.6-14.redhat_2.1.el8eap.noarch eap7-jberet-core 1.3.7-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-aesh 2.4.0-1.redhat_00001.1.el8eap.noarch eap7-jboss-annotations-api_1.3_spec 2.0.1-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-batch-api_1.0_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-classfilewriter 1.2.4-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-common-beans 2.0.1-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-concurrency-api_1.0_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-connector-api_1.7_spec 2.0.0-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-dmr 1.5.0-2.Final_redhat_1.1.el8eap.noarch eap7-jboss-ejb-api_3.2_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-ejb-client 4.0.33-2.SP1_redhat_00001.1.el8eap.noarch eap7-jboss-ejb3-ext-api 2.3.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-el-api_3.0_spec 2.0.0-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-genericjms 2.0.6-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-iiop-client 1.0.1-3.Final_redhat_1.1.el8eap.noarch eap7-jboss-interceptors-api_1.2_spec 2.0.0-3.Final_redhat_00002.1.el8eap.noarch eap7-jboss-invocation 1.5.3-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-j2eemgmt-api_1.1_spec 2.0.0-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-jacc-api_1.5_spec 2.0.0-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-jaspi-api_1.1_spec 2.0.1-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-jaxb-api_2.3_spec 1.0.1-1.Final_redhat_1.1.el8eap.noarch eap7-jboss-jaxrpc-api_1.1_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-jaxrs-api_2.1_spec 2.0.1-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-jaxws-api_2.3_spec 1.0.0-1.Final_redhat_1.1.el8eap.noarch eap7-jboss-jms-api_2.0_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-jsf-api_2.3_spec 3.0.0-4.SP04_redhat_00001.1.el8eap.noarch eap7-jboss-jsp-api_2.3_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-logging 3.4.1-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-logmanager 2.1.17-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-marshalling 2.0.9-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-marshalling-river 2.0.9-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-metadata-appclient 13.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-metadata-common 13.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-metadata-ear 13.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-metadata-ejb 13.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-metadata-web 13.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-modules 1.10.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-msc 1.4.11-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-openjdk-orb 8.1.4-3.Final_redhat_00002.1.el8eap.noarch eap7-jboss-remoting 5.0.18-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-remoting-jmx 3.0.4-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-saaj-api_1.3_spec 1.0.6-1.Final_redhat_1.1.el8eap.noarch eap7-jboss-saaj-api_1.4_spec 1.0.1-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-seam-int 7.0.0-6.GA_redhat_2.1.el8eap.noarch eap7-jboss-security-negotiation 3.0.6-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-security-xacml 2.0.8-17.Final_redhat_8.1.el8eap.noarch eap7-jboss-server-migration 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-cli 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-core 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap6.4 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap6.4-to-eap7.3 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap7.0 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap7.1 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap7.2 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap7.2-to-eap7.3 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-eap7.3-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly10.0 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly10.1 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly11.0 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly12.0 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly13.0-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly14.0-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly15.0-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly16.0-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly17.0-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly18.0-server 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly8.2 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-server-migration-wildfly9.0 1.7.2-2.Final_redhat_00002.1.el8eap.noarch eap7-jboss-servlet-api_4.0_spec 2.0.0-2.Final_redhat_00001.1.el8eap.noarch eap7-jboss-stdio 1.1.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-threads 2.3.3-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-transaction-api_1.3_spec 2.0.0-3.Final_redhat_00002.1.el8eap.noarch eap7-jboss-transaction-spi 7.6.0-2.Final_redhat_1.1.el8eap.noarch eap7-jboss-vfs 3.2.15-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-websocket-api_1.1_spec 2.0.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jboss-weld-3.1-api-weld-api 3.1.0-6.SP2_redhat_00001.1.el8eap.noarch eap7-jboss-weld-3.1-api-weld-spi 3.1.0-6.SP2_redhat_00001.1.el8eap.noarch eap7-jboss-xnio-base 3.7.9-1.Final_redhat_00001.1.el8eap.noarch eap7-jbossws-api 1.1.2-1.Final_redhat_00001.1.el8eap.noarch eap7-jbossws-common 3.2.3-1.Final_redhat_00001.1.el8eap.noarch eap7-jbossws-common-tools 1.3.2-1.Final_redhat_00001.1.el8eap.noarch eap7-jbossws-cxf 5.3.0-1.Final_redhat_00001.1.el8eap.noarch eap7-jbossws-jaxws-undertow-httpspi 1.0.1-3.Final_redhat_1.1.el8eap.noarch eap7-jbossws-spi 3.2.3-1.Final_redhat_00001.1.el8eap.noarch eap7-jcip-annotations 1.0.0-5.redhat_8.1.el8eap.noarch eap7-jettison 1.4.0-1.redhat_00001.1.el8eap.noarch eap7-jgroups 4.1.10-1.Final_redhat_00001.1.el8eap.noarch eap7-jgroups-azure 1.2.1-1.Final_redhat_00001.1.el8eap.noarch eap7-jgroups-kubernetes 1.0.13-1.Final_redhat_00001.1.el8eap.noarch eap7-joda-time 2.9.7-2.redhat_1.1.el8eap.noarch eap7-jsch 0.1.54-7.redhat_00001.1.el8eap.noarch eap7-json-patch 1.9.0-1.redhat_00002.1.el8eap.noarch eap7-jsonb-spec 1.0.2-1.redhat_00001.1.el8eap.noarch eap7-jsoup 1.8.3-4.redhat_2.1.el8eap.noarch eap7-jul-to-slf4j-stub 1.0.1-7.Final_redhat_3.1.el8eap.noarch eap7-jzlib 1.1.1-7.redhat_00001.1.el8eap.noarch eap7-log4j-jboss-logmanager 1.2.0-1.Final_redhat_00001.1.el8eap.noarch eap7-lucene-analyzers-common 5.5.5-3.redhat_2.1.el8eap.noarch eap7-lucene-backward-codecs 5.5.5-3.redhat_2.1.el8eap.noarch eap7-lucene-core 5.5.5-3.redhat_2.1.el8eap.noarch eap7-lucene-facet 5.5.5-3.redhat_2.1.el8eap.noarch eap7-lucene-misc 5.5.5-3.redhat_2.1.el8eap.noarch eap7-lucene-queries 5.5.5-3.redhat_2.1.el8eap.noarch eap7-lucene-queryparser 5.5.5-3.redhat_2.1.el8eap.noarch eap7-microprofile-config-api 1.4.0-1.redhat_00003.1.el8eap.noarch eap7-microprofile-health 2.2.0-1.redhat_00001.1.el8eap.noarch eap7-microprofile-metrics-api 2.3.0-1.redhat_00001.1.el8eap.noarch eap7-microprofile-opentracing-api 1.3.3-1.redhat_00001.1.el8eap.noarch eap7-microprofile-rest-client-api 1.4.0-1.redhat_00004.1.el8eap.noarch eap7-mod_cluster 1.4.1-1.Final_redhat_00001.1.el8eap.noarch eap7-mustache-java-compiler 0.9.4-2.redhat_1.1.el8eap.noarch eap7-narayana-compensations 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-jbosstxbridge 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-jbossxts 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-jts-idlj 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-jts-integration 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-restat-api 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-restat-bridge 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-restat-integration 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-restat-util 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-narayana-txframework 5.9.9-1.Final_redhat_00001.1.el8eap.noarch eap7-neethi 3.1.1-1.redhat_1.1.el8eap.noarch eap7-netty-all 4.1.48-1.Final_redhat_00001.1.el8eap.noarch eap7-netty-xnio-transport 0.1.6-1.Final_redhat_00001.1.el8eap.noarch eap7-objectweb-asm 7.1.0-1.redhat_00001.1.el8eap.noarch eap7-okhttp 3.9.0-3.redhat_3.1.el8eap.noarch eap7-okio 1.13.0-2.redhat_3.1.el8eap.noarch eap7-opensaml-core 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-profile-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-saml-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-saml-impl 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-security-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-security-impl 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-soap-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-xacml-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-xacml-impl 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-xacml-saml-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-xacml-saml-impl 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-xmlsec-api 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opensaml-xmlsec-impl 3.3.1-1.redhat_00002.1.el8eap.noarch eap7-opentracing-contrib-java-concurrent 0.2.1-1.redhat_00001.1.el8eap.noarch eap7-opentracing-contrib-java-jaxrs 0.4.1-1.redhat_00006.1.el8eap.noarch eap7-opentracing-contrib-java-tracerresolver 0.1.5-1.redhat_00001.1.el8eap.noarch eap7-opentracing-contrib-java-web-servlet-filter 0.2.3-1.redhat_00001.1.el8eap.noarch eap7-opentracing-interceptors 0.0.4-1.redhat_00004.1.el8eap.noarch eap7-opentracing-java-api 0.31.0-1.redhat_00008.1.el8eap.noarch eap7-opentracing-java-noop 0.31.0-1.redhat_00008.1.el8eap.noarch eap7-opentracing-java-util 0.31.0-1.redhat_00008.1.el8eap.noarch eap7-picketbox 5.0.3-8.Final_redhat_00007.1.el8eap.noarch eap7-picketbox-commons 1.0.0-4.final_redhat_5.1.el8eap.noarch eap7-picketbox-infinispan 5.0.3-8.Final_redhat_00007.1.el8eap.noarch eap7-picketlink-api 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-common 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-config 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-federation 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-idm-api 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-idm-impl 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-idm-simple-schema 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-impl 2.5.5-20.SP12_redhat_00009.1.el8eap.noarch eap7-picketlink-wildfly8 2.5.5-25.SP12_redhat_00013.1.el8eap.noarch eap7-python3-javapackages 3.4.1-5.15.6.el8eap.noarch eap7-reactive-streams 1.0.2-2.redhat_1.1.el8eap.noarch eap7-reactivex-rxjava 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-relaxng-datatype 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-resteasy-atom-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-cdi 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-client 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-client-microprofile 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-crypto 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jackson-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jackson2-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jaxb-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jaxrs 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jettison-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jose-jwt 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-jsapi 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-json-binding-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-json-p-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-multipart-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-rxjava2 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-spring 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-validator-provider-11 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-resteasy-yaml-provider 3.11.2-3.Final_redhat_00002.1.el8eap.noarch eap7-rngom 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-runtime 1-16.el8eap.x86_64 eap7-shibboleth-java-support 7.3.0-1.redhat_00001.1.el8eap.noarch eap7-slf4j-api 1.7.22-4.redhat_2.1.el8eap.noarch eap7-slf4j-ext 1.7.22-4.redhat_2.1.el8eap.noarch eap7-slf4j-jboss-logmanager 1.0.4-1.GA_redhat_00001.1.el8eap.noarch eap7-smallrye-config 1.6.2-3.redhat_00004.1.el8eap.noarch eap7-smallrye-health 2.2.0-1.redhat_00004.1.el8eap.noarch eap7-smallrye-metrics 2.4.0-1.redhat_00004.1.el8eap.noarch eap7-smallrye-opentracing 1.3.4-1.redhat_00004.1.el8eap.noarch eap7-snakeyaml 1.26.0-1.redhat_00001.1.el8eap.noarch eap7-stax-ex 1.7.8-1.redhat_00001.1.el8eap.noarch eap7-stax2-api 4.2.0-1.redhat_00001.1.el8eap.noarch eap7-staxmapper 1.3.0-2.Final_redhat_1.1.el8eap.noarch eap7-sun-saaj-1.3-impl 1.3.16-18.SP1_redhat_6.1.el8eap.noarch eap7-sun-saaj-1.4-impl 1.4.1-1.SP1_redhat_00001.1.el8eap.noarch eap7-sun-ws-metadata-2.0-api 1.0.0-7.MR1_redhat_8.1.el8eap.noarch eap7-taglibs-standard-compat 1.2.6-2.RC1_redhat_1.1.el8eap.noarch eap7-taglibs-standard-impl 1.2.6-2.RC1_redhat_1.1.el8eap.noarch eap7-taglibs-standard-spec 1.2.6-2.RC1_redhat_1.1.el8eap.noarch eap7-thrift 0.13.0-1.redhat_00002.1.el8eap.noarch eap7-txw2 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-undertow 2.0.31-1.SP1_redhat_00001.1.el8eap.noarch eap7-undertow-jastow 2.0.8-1.Final_redhat_00001.1.el8eap.noarch eap7-undertow-js 1.0.2-2.Final_redhat_1.1.el8eap.noarch eap7-undertow-server 1.6.2-1.Final_redhat_00001.1.el8eap.noarch eap7-vdx-core 1.1.6-2.redhat_1.1.el8eap.noarch eap7-vdx-wildfly 1.1.6-2.redhat_1.1.el8eap.noarch eap7-velocity 2.2.0-1.redhat_00001.1.el8eap.noarch eap7-velocity-engine-core 2.2.0-1.redhat_00001.1.el8eap.noarch eap7-weld-cdi-2.0-api 2.0.2-2.redhat_00002.1.el8eap.noarch eap7-weld-core-impl 3.1.4-1.Final_redhat_00001.1.el8eap.noarch eap7-weld-core-jsf 3.1.4-1.Final_redhat_00001.1.el8eap.noarch eap7-weld-ejb 3.1.4-1.Final_redhat_00001.1.el8eap.noarch eap7-weld-jta 3.1.4-1.Final_redhat_00001.1.el8eap.noarch eap7-weld-probe-core 3.1.4-1.Final_redhat_00001.1.el8eap.noarch eap7-weld-web 3.1.4-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly 7.3.3-4.GA_redhat_00004.1.el8eap.noarch eap7-wildfly-client-config 1.0.1-2.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-common 1.5.2-1.Final_redhat_00002.1.el8eap.noarch eap7-wildfly-discovery-client 1.2.0-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-elytron 1.10.8-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-elytron-tool 1.10.8-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-http-client-common 1.0.22-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-http-ejb-client 1.0.22-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-http-naming-client 1.0.22-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-http-transaction-client 1.0.22-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-modules 7.3.3-4.GA_redhat_00004.1.el8eap.noarch eap7-wildfly-naming-client 1.0.13-1.Final_redhat_00001.1.el8eap.noarch eap7-wildfly-openssl-java 1.0.9-2.SP03_redhat_00001.1.el8eap.noarch eap7-wildfly-openssl-linux-x86_64 1.0.12-1.Final_redhat_00001.1.el8eap.x86_64 eap7-wildfly-transaction-client 1.1.13-1.Final_redhat_00001.1.el8eap.noarch eap7-woodstox-core 6.0.3-1.redhat_00001.1.el8eap.noarch eap7-ws-commons-XmlSchema 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-wsdl4j 1.6.3-13.redhat_2.1.el8eap.noarch eap7-wss4j-bindings 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-wss4j-policy 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-wss4j-ws-security-common 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-wss4j-ws-security-dom 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-wss4j-ws-security-policy-stax 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-wss4j-ws-security-stax 2.2.5-1.redhat_00001.1.el8eap.noarch eap7-xalan-j2 2.7.1-35.redhat_12.1.el8eap.noarch eap7-xerces-j2 2.12.0-2.SP03_redhat_00001.1.el8eap.noarch eap7-xml-resolver 1.2.0-7.redhat_12.1.el8eap.noarch eap7-xml-security 2.1.4-1.redhat_00001.1.el8eap.noarch eap7-xom 1.2.10-4.redhat_1.1.el8eap.noarch eap7-xsom 2.3.3-4.b02_redhat_00001.1.el8eap.noarch eap7-yasson 1.0.5-1.redhat_00001.1.el8eap.noarch ebay-cors-filter 1.0.1-4.el8ev.noarch efi-srpm-macros 3-2.el8.noarch elfutils 0.180-1.el8.x86_64 elfutils-debuginfod-client 0.180-1.el8.x86_64 elfutils-default-yama-scope 0.180-1.el8.noarch elfutils-libelf 0.180-1.el8.x86_64 elfutils-libs 0.180-1.el8.x86_64 emacs-filesystem 26.1-5.el8.noarch engine-db-query 1.6.2-1.el8ev.noarch environment-modules 4.5.2-1.el8.x86_64 ethtool 5.0-2.el8.x86_64 expat 2.2.5-4.el8.x86_64 file 5.33-16.el8.x86_64 file-libs 5.33-16.el8.x86_64 filesystem 3.8-3.el8.x86_64 findutils 4.6.0-20.el8.x86_64 firewalld 0.8.2-2.el8.noarch firewalld-filesystem 0.8.2-2.el8.noarch fontconfig 2.13.1-3.el8.x86_64 fontpackages-filesystem 1.44-22.el8.noarch freetype 2.9.1-4.el8_3.1.x86_64 fribidi 1.0.4-8.el8.x86_64 fuse-libs 2.9.7-12.el8.x86_64 gawk 4.2.1-1.el8.x86_64 gc 7.6.4-3.el8.x86_64 gd 2.2.5-7.el8.x86_64 gdb-headless 8.2-12.el8.x86_64 gdbm 1.18-1.el8.x86_64 gdbm-libs 1.18-1.el8.x86_64 gdk-pixbuf2 2.36.12-5.el8.x86_64 gdk-pixbuf2-modules 2.36.12-5.el8.x86_64 geolite2-city 20180605-1.el8.noarch geolite2-country 20180605-1.el8.noarch gettext 0.19.8.1-17.el8.x86_64 gettext-libs 0.19.8.1-17.el8.x86_64 ghc-srpm-macros 1.4.2-7.el8.noarch giflib 5.1.4-3.el8.x86_64 git-core 2.27.0-1.el8.x86_64 glassfish-fastinfoset 1.2.13-9.module+el8.1.0+3366+6dfb954c.noarch glassfish-jaxb-api 2.2.12-8.module+el8.1.0+3366+6dfb954c.noarch glassfish-jaxb-core 2.2.11-11.module+el8.1.0+3366+6dfb954c.noarch glassfish-jaxb-runtime 2.2.11-11.module+el8.1.0+3366+6dfb954c.noarch glassfish-jaxb-txw2 2.2.11-11.module+el8.1.0+3366+6dfb954c.noarch glib-networking 2.56.1-1.1.el8.x86_64 glib2 2.56.4-8.el8.x86_64 glibc 2.28-127.el8.x86_64 glibc-common 2.28-127.el8.x86_64 glibc-langpack-en 2.28-127.el8.x86_64 gmp 6.1.2-10.el8.x86_64 gnupg2 2.2.20-2.el8.x86_64 gnupg2-smime 2.2.20-2.el8.x86_64 gnutls 3.6.14-6.el8.x86_64 gnutls-dane 3.6.14-6.el8.x86_64 gnutls-utils 3.6.14-6.el8.x86_64 go-srpm-macros 2-16.el8.noarch gobject-introspection 1.56.1-1.el8.x86_64 google-droid-sans-fonts 20120715-13.el8.noarch gpgme 1.13.1-3.el8.x86_64 grafana 6.7.4-3.el8.x86_64 grafana-postgres 6.7.4-3.el8.x86_64 graphite2 1.3.10-10.el8.x86_64 graphviz 2.40.1-40.el8.x86_64 grep 3.1-6.el8.x86_64 groff-base 1.22.3-18.el8.x86_64 grub2-common 2.02-90.el8.noarch grub2-pc 2.02-90.el8.x86_64 grub2-pc-modules 2.02-90.el8.noarch grub2-tools 2.02-90.el8.x86_64 grub2-tools-extra 2.02-90.el8.x86_64 grub2-tools-minimal 2.02-90.el8.x86_64 grubby 8.40-41.el8.x86_64 gsettings-desktop-schemas 3.32.0-5.el8.x86_64 gssproxy 0.8.0-16.el8.x86_64 gtk-update-icon-cache 3.22.30-6.el8.x86_64 gtk2 2.24.32-4.el8.x86_64 guile 2.0.14-7.el8.x86_64 gzip 1.9-9.el8.x86_64 hardlink 1.3-6.el8.x86_64 harfbuzz 1.7.5-3.el8.x86_64 hdparm 9.54-2.el8.x86_64 hicolor-icon-theme 0.17-2.el8.noarch hostname 3.20-6.el8.x86_64 httpcomponents-client 4.5.5-4.module+el8+2598+06babf2e.noarch httpcomponents-core 4.4.10-3.module+el8+2598+06babf2e.noarch httpd 2.4.37-30.module+el8.3.0+7001+0766b9e7.x86_64 httpd-filesystem 2.4.37-30.module+el8.3.0+7001+0766b9e7.noarch httpd-tools 2.4.37-30.module+el8.3.0+7001+0766b9e7.x86_64 hwdata 0.314-8.6.el8.noarch ima-evm-utils 1.1-5.el8.x86_64 info 6.5-6.el8.x86_64 initscripts 10.00.9-1.el8.x86_64 insights-client 3.1.0-3.el8.noarch ipcalc 0.2.4-4.el8.x86_64 iproute 5.3.0-5.el8.x86_64 iprutils 2.4.19-1.el8.x86_64 ipset 7.1-1.el8.x86_64 ipset-libs 7.1-1.el8.x86_64 iptables 1.8.4-15.el8.x86_64 iptables-ebtables 1.8.4-15.el8.x86_64 iptables-libs 1.8.4-15.el8.x86_64 iputils 20180629-2.el8.x86_64 irqbalance 1.4.0-4.el8.x86_64 istack-commons-runtime 2.21-9.el8+7.noarch iwl100-firmware 39.31.5.1-99.el8.1.noarch iwl1000-firmware 39.31.5.1-99.el8.1.noarch iwl105-firmware 18.168.6.1-99.el8.1.noarch iwl135-firmware 18.168.6.1-99.el8.1.noarch iwl2000-firmware 18.168.6.1-99.el8.1.noarch iwl2030-firmware 18.168.6.1-99.el8.1.noarch iwl3160-firmware 25.30.13.0-99.el8.1.noarch iwl3945-firmware 15.32.2.9-99.el8.1.noarch iwl4965-firmware 228.61.2.24-99.el8.1.noarch iwl5000-firmware 8.83.5.1_1-99.el8.1.noarch iwl5150-firmware 8.24.2.2-99.el8.1.noarch iwl6000-firmware 9.221.4.1-99.el8.1.noarch iwl6000g2a-firmware 18.168.6.1-99.el8.1.noarch iwl6050-firmware 41.28.5.1-99.el8.1.noarch iwl7260-firmware 25.30.13.0-99.el8.1.noarch jackson-annotations 2.10.0-1.module+el8.2.0+5059+3eb3af25.noarch jackson-core 2.10.0-1.module+el8.2.0+5059+3eb3af25.noarch jackson-databind 2.10.0-1.module+el8.2.0+5059+3eb3af25.noarch jackson-jaxrs-json-provider 2.9.9-1.module+el8.1.0+3832+9784644d.noarch jackson-jaxrs-providers 2.9.9-1.module+el8.1.0+3832+9784644d.noarch jackson-module-jaxb-annotations 2.7.6-4.module+el8.1.0+3366+6dfb954c.noarch jansson 2.11-3.el8.x86_64 jasper-libs 2.0.14-4.el8.x86_64 java-1.8.0-openjdk 1.8.0.272.b10-3.el8_3.x86_64 java-1.8.0-openjdk-headless 1.8.0.272.b10-3.el8_3.x86_64 java-11-openjdk-headless 11.0.9.11-2.el8_3.x86_64 java-client-kubevirt 0.5.0-1.el8ev.noarch javapackages-filesystem 5.3.0-2.module+el8+2598+06babf2e.noarch javapackages-tools 5.3.0-2.module+el8+2598+06babf2e.noarch jbig2dec-libs 0.14-4.el8_2.x86_64 jbigkit-libs 2.1-14.el8.x86_64 jboss-annotations-1.2-api 1.0.0-4.el8.noarch jboss-jaxrs-2.0-api 1.0.0-6.el8.noarch jboss-logging 3.3.0-5.el8.noarch jboss-logging-tools 2.0.1-6.el8.noarch jcl-over-slf4j 1.7.25-4.module+el8.1.0+3366+6dfb954c.noarch jdeparser 2.0.0-5.el8.noarch jq 1.5-12.el8.x86_64 json-c 0.13.1-0.2.el8.x86_64 json-glib 1.4.4-1.el8.x86_64 kbd 2.0.4-10.el8.x86_64 kbd-legacy 2.0.4-10.el8.noarch kbd-misc 2.0.4-10.el8.noarch kernel 4.18.0-240.1.1.el8_3.x86_64 kernel-core 4.18.0-240.1.1.el8_3.x86_64 kernel-modules 4.18.0-240.1.1.el8_3.x86_64 kernel-tools 4.18.0-240.1.1.el8_3.x86_64 kernel-tools-libs 4.18.0-240.1.1.el8_3.x86_64 kexec-tools 2.0.20-34.el8.x86_64 keyutils 1.5.10-6.el8.x86_64 keyutils-libs 1.5.10-6.el8.x86_64 kmod 25-16.el8.x86_64 kmod-libs 25-16.el8.x86_64 kpartx 0.8.4-5.el8.x86_64 krb5-libs 1.18.2-5.el8.x86_64 langpacks-en 1.0-12.el8.noarch lcms2 2.9-2.el8.x86_64 less 530-1.el8.x86_64 libICE 1.0.9-15.el8.x86_64 libSM 1.2.3-1.el8.x86_64 libX11 1.6.8-3.el8.x86_64 libX11-common 1.6.8-3.el8.noarch libXau 1.0.9-3.el8.x86_64 libXaw 1.0.13-10.el8.x86_64 libXcomposite 0.4.4-14.el8.x86_64 libXcursor 1.1.15-3.el8.x86_64 libXdamage 1.1.4-14.el8.x86_64 libXext 1.3.4-1.el8.x86_64 libXfixes 5.0.3-7.el8.x86_64 libXft 2.3.3-1.el8.x86_64 libXi 1.7.10-1.el8.x86_64 libXinerama 1.1.4-1.el8.x86_64 libXmu 1.1.3-1.el8.x86_64 libXpm 3.5.12-8.el8.x86_64 libXrandr 1.5.2-1.el8.x86_64 libXrender 0.9.10-7.el8.x86_64 libXt 1.1.5-12.el8.x86_64 libXtst 1.2.3-7.el8.x86_64 libXxf86misc 1.0.4-1.el8.x86_64 libXxf86vm 1.1.4-9.el8.x86_64 libacl 2.2.53-1.el8.x86_64 libaio 0.3.112-1.el8.x86_64 libappstream-glib 0.7.14-3.el8.x86_64 libarchive 3.3.2-9.el8.x86_64 libassuan 2.5.1-3.el8.x86_64 libatomic_ops 7.6.2-3.el8.x86_64 libattr 2.4.48-3.el8.x86_64 libbabeltrace 1.5.4-3.el8.x86_64 libbasicobjects 0.1.1-39.el8.x86_64 libblkid 2.32.1-24.el8.x86_64 libcap 2.26-4.el8.x86_64 libcap-ng 0.7.9-5.el8.x86_64 libcollection 0.7.0-39.el8.x86_64 libcom_err 1.45.6-1.el8.x86_64 libcomps 0.1.11-4.el8.x86_64 libcroco 0.6.12-4.el8_2.1.x86_64 libcurl 7.61.1-14.el8.x86_64 libdaemon 0.14-15.el8.x86_64 libdatrie 0.2.9-7.el8.x86_64 libdb 5.3.28-39.el8.x86_64 libdb-utils 5.3.28-39.el8.x86_64 libdhash 0.5.0-39.el8.x86_64 libdnf 0.48.0-5.el8.x86_64 libedit 3.1-23.20170329cvs.el8.x86_64 libestr 0.1.10-1.el8.x86_64 libevent 2.1.8-5.el8.x86_64 libfastjson 0.99.8-2.el8.x86_64 libfdisk 2.32.1-24.el8.x86_64 libffi 3.1-22.el8.x86_64 libfontenc 1.1.3-8.el8.x86_64 libgcc 8.3.1-5.1.el8.x86_64 libgcrypt 1.8.5-4.el8.x86_64 libgfortran 8.3.1-5.1.el8.x86_64 libgomp 8.3.1-5.1.el8.x86_64 libgpg-error 1.31-1.el8.x86_64 libgs 9.25-7.el8.x86_64 libicu 60.3-2.el8_1.x86_64 libidn 1.34-5.el8.x86_64 libidn2 2.2.0-1.el8.x86_64 libijs 0.35-5.el8.x86_64 libini_config 1.3.1-39.el8.x86_64 libipt 1.6.1-8.el8.x86_64 libjpeg-turbo 1.5.3-10.el8.x86_64 libkcapi 1.2.0-2.el8.x86_64 libkcapi-hmaccalc 1.2.0-2.el8.x86_64 libksba 1.3.5-7.el8.x86_64 libldb 2.1.3-2.el8.x86_64 liblognorm 2.0.5-1.el8.x86_64 libmaxminddb 1.2.0-10.el8.x86_64 libmcpp 2.7.2-20.el8.x86_64 libmetalink 0.1.3-7.el8.x86_64 libmnl 1.0.4-6.el8.x86_64 libmodman 2.0.1-17.el8.x86_64 libmodulemd 2.9.4-2.el8.x86_64 libmount 2.32.1-24.el8.x86_64 libndp 1.7-3.el8.x86_64 libnetfilter_conntrack 1.0.6-5.el8.x86_64 libnfnetlink 1.0.1-13.el8.x86_64 libnfsidmap 2.3.3-35.el8.x86_64 libnftnl 1.1.5-4.el8.x86_64 libnghttp2 1.33.0-3.el8_2.1.x86_64 libnl3 3.5.0-1.el8.x86_64 libnl3-cli 3.5.0-1.el8.x86_64 libnsl2 1.2.0-2.20180605git4a062cf.el8.x86_64 libpaper 1.1.24-22.el8.x86_64 libpath_utils 0.2.1-39.el8.x86_64 libpcap 1.9.1-4.el8.x86_64 libpipeline 1.5.0-2.el8.x86_64 libpkgconf 1.4.2-1.el8.x86_64 libpng 1.6.34-5.el8.x86_64 libpq 12.4-1.el8_2.x86_64 libproxy 0.4.15-5.2.el8.x86_64 libpsl 0.20.2-6.el8.x86_64 libpwquality 1.4.0-9.el8.x86_64 libquadmath 8.3.1-5.1.el8.x86_64 libref_array 0.1.5-39.el8.x86_64 librepo 1.12.0-2.el8.x86_64 libreport-filesystem 2.9.5-15.el8.x86_64 librhsm 0.0.3-3.el8.x86_64 librsvg2 2.42.7-4.el8.x86_64 libseccomp 2.4.3-1.el8.x86_64 libsecret 0.18.6-1.el8.x86_64 libselinux 2.9-4.el8_3.x86_64 libselinux-utils 2.9-4.el8_3.x86_64 libsemanage 2.9-3.el8.x86_64 libsepol 2.9-1.el8.x86_64 libsigsegv 2.11-5.el8.x86_64 libsmartcols 2.32.1-24.el8.x86_64 libsodium 1.0.18-2.el8ev.x86_64 libsolv 0.7.11-1.el8.x86_64 libsoup 2.62.3-2.el8.x86_64 libss 1.45.6-1.el8.x86_64 libssh 0.9.4-2.el8.x86_64 libssh-config 0.9.4-2.el8.noarch libsss_autofs 2.2.3-20.el8.x86_64 libsss_certmap 2.2.3-20.el8.x86_64 libsss_idmap 2.2.3-20.el8.x86_64 libsss_nss_idmap 2.2.3-20.el8.x86_64 libsss_sudo 2.2.3-20.el8.x86_64 libstdc++ 8.3.1-5.1.el8.x86_64 libstemmer 0-10.585svn.el8.x86_64 libsysfs 2.1.0-24.el8.x86_64 libtalloc 2.3.1-2.el8.x86_64 libtasn1 4.13-3.el8.x86_64 libtdb 1.4.3-1.el8.x86_64 libteam 1.31-2.el8.x86_64 libtevent 0.10.2-2.el8.x86_64 libthai 0.1.27-2.el8.x86_64 libtiff 4.0.9-18.el8.x86_64 libtirpc 1.1.4-4.el8.x86_64 libtool-ltdl 2.4.6-25.el8.x86_64 libunistring 0.9.9-3.el8.x86_64 libusbx 1.0.23-4.el8.x86_64 libuser 0.62-23.el8.x86_64 libutempter 1.1.6-14.el8.x86_64 libuuid 2.32.1-24.el8.x86_64 libverto 0.3.0-5.el8.x86_64 libverto-libevent 0.3.0-5.el8.x86_64 libwebp 1.0.0-1.el8.x86_64 libxcb 1.13.1-1.el8.x86_64 libxcrypt 4.1.1-4.el8.x86_64 libxkbcommon 0.9.1-1.el8.x86_64 libxml2 2.9.7-8.el8.x86_64 libxslt 1.1.32-5.el8.x86_64 libyaml 0.1.7-5.el8.x86_64 libzstd 1.4.4-1.el8.x86_64 lksctp-tools 1.0.18-3.el8.x86_64 log4j12 1.2.17-22.el8ev.noarch logrotate 3.14.0-4.el8.x86_64 lshw B.02.19.2-2.el8.x86_64 lsscsi 0.30-1.el8.x86_64 lua 5.3.4-11.el8.x86_64 lua-libs 5.3.4-11.el8.x86_64 lvm2 2.03.09-5.el8.x86_64 lvm2-libs 2.03.09-5.el8.x86_64 lz4-libs 1.8.3-2.el8.x86_64 lzo 2.08-14.el8.x86_64 mailcap 2.1.48-3.el8.noarch man-db 2.7.6.1-17.el8.x86_64 mcpp 2.7.2-20.el8.x86_64 memstrack 0.1.11-1.el8.x86_64 microcode_ctl 20200609-2.20201027.1.el8_3.x86_64 mod_auth_gssapi 1.6.1-6.el8.x86_64 mod_auth_openidc 2.3.7-4.module+el8.2.0+6919+ac02cfd2.3.x86_64 mod_http2 1.15.7-2.module+el8.3.0+7670+8bf57d29.x86_64 mod_session 2.4.37-30.module+el8.3.0+7001+0766b9e7.x86_64 mod_ssl 2.4.37-30.module+el8.3.0+7001+0766b9e7.x86_64 mozjs60 60.9.0-4.el8.x86_64 mpfr 3.1.6-1.el8.x86_64 ncurses 6.1-7.20180224.el8.x86_64 ncurses-base 6.1-7.20180224.el8.noarch ncurses-libs 6.1-7.20180224.el8.x86_64 net-tools 2.0-0.51.20160912git.el8.x86_64 nettle 3.4.1-2.el8.x86_64 newt 0.52.20-11.el8.x86_64 nfs-utils 2.3.3-35.el8.x86_64 nftables 0.9.3-16.el8.x86_64 nodejs 14.11.0-1.module+el8.3.0+8180+4125ea5c.x86_64 novnc 1.1.0-1.el8ost.noarch npth 1.5-4.el8.x86_64 numactl-libs 2.0.12-11.el8.x86_64 ocaml-srpm-macros 5-4.el8.noarch oddjob 0.34.5-3.el8.x86_64 oddjob-mkhomedir 0.34.5-3.el8.x86_64 ongres-scram 1.0.0~beta.2-5.el8.noarch ongres-scram-client 1.0.0~beta.2-5.el8.noarch oniguruma 6.8.2-2.el8.x86_64 openblas 0.3.3-5.el8.x86_64 openblas-srpm-macros 2-2.el8.noarch openblas-threads 0.3.3-5.el8.x86_64 openjpeg2 2.3.1-6.el8.x86_64 openldap 2.4.46-15.el8.x86_64 opensc 0.20.0-2.el8.x86_64 openscap 1.3.3-5.el8.x86_64 openscap-scanner 1.3.3-5.el8.x86_64 openscap-utils 1.3.3-5.el8.x86_64 openssh 8.0p1-5.el8.x86_64 openssh-clients 8.0p1-5.el8.x86_64 openssh-server 8.0p1-5.el8.x86_64 openssl 1.1.1g-11.el8.x86_64 openssl-libs 1.1.1g-11.el8.x86_64 openssl-pkcs11 0.4.10-2.el8.x86_64 openstack-java-cinder-client 3.2.9-1.el8ev.noarch openstack-java-cinder-model 3.2.9-1.el8ev.noarch openstack-java-client 3.2.9-1.el8ev.noarch openstack-java-glance-client 3.2.9-1.el8ev.noarch openstack-java-glance-model 3.2.9-1.el8ev.noarch openstack-java-keystone-client 3.2.9-1.el8ev.noarch openstack-java-keystone-model 3.2.9-1.el8ev.noarch openstack-java-quantum-client 3.2.9-1.el8ev.noarch openstack-java-quantum-model 3.2.9-1.el8ev.noarch openstack-java-resteasy-connector 3.2.9-1.el8ev.noarch openvswitch-selinux-extra-policy 1.0-22.el8fdp.noarch openvswitch2.11 2.11.3-68.el8fdp.x86_64 os-prober 1.74-6.el8.x86_64 otopi-common 1.9.2-1.el8ev.noarch ovirt-ansible-collection 1.2.2-1.el8ev.noarch ovirt-cockpit-sso 0.1.4-1.el8ev.noarch ovirt-engine 4.4.3.12-0.1.el8ev.noarch ovirt-engine-backend 4.4.3.12-0.1.el8ev.noarch ovirt-engine-dbscripts 4.4.3.12-0.1.el8ev.noarch ovirt-engine-dwh 4.4.3.2-1.el8ev.noarch ovirt-engine-dwh-grafana-integration-setup 4.4.3.2-1.el8ev.noarch ovirt-engine-dwh-setup 4.4.3.2-1.el8ev.noarch ovirt-engine-extension-aaa-jdbc 1.2.0-1.el8ev.noarch ovirt-engine-extension-aaa-ldap 1.4.2-1.el8ev.noarch ovirt-engine-extension-aaa-ldap-setup 1.4.2-1.el8ev.noarch ovirt-engine-extension-aaa-misc 1.1.0-1.el8ev.noarch ovirt-engine-extension-logger-log4j 1.1.1-1.el8ev.noarch ovirt-engine-extensions-api 1.0.1-1.el8ev.noarch ovirt-engine-metrics 1.4.2.2-1.el8ev.noarch ovirt-engine-restapi 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-base 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-plugin-cinderlib 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-plugin-imageio 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-plugin-ovirt-engine 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-plugin-ovirt-engine-common 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-plugin-vmconsole-proxy-helper 4.4.3.12-0.1.el8ev.noarch ovirt-engine-setup-plugin-websocket-proxy 4.4.3.12-0.1.el8ev.noarch ovirt-engine-tools 4.4.3.12-0.1.el8ev.noarch ovirt-engine-tools-backup 4.4.3.12-0.1.el8ev.noarch ovirt-engine-ui-extensions 1.2.4-1.el8ev.noarch ovirt-engine-vmconsole-proxy-helper 4.4.3.12-0.1.el8ev.noarch ovirt-engine-webadmin-portal 4.4.3.12-0.1.el8ev.noarch ovirt-engine-websocket-proxy 4.4.3.12-0.1.el8ev.noarch ovirt-imageio-common 2.1.1-1.el8ev.x86_64 ovirt-imageio-daemon 2.1.1-1.el8ev.x86_64 ovirt-log-collector 4.4.4-1.el8ev.noarch ovirt-provider-ovn 1.2.32-1.el8ev.noarch ovirt-vmconsole 1.0.8-1.el8ev.noarch ovirt-vmconsole-proxy 1.0.8-1.el8ev.noarch ovirt-web-ui 1.6.5-1.el8ev.noarch ovn2.11 2.11.1-54.el8fdp.x86_64 ovn2.11-central 2.11.1-54.el8fdp.x86_64 p11-kit 0.23.14-5.el8_0.x86_64 p11-kit-trust 0.23.14-5.el8_0.x86_64 pam 1.3.1-11.el8.x86_64 pango 1.42.4-6.el8.x86_64 parted 3.2-38.el8.x86_64 passwd 0.80-3.el8.x86_64 patch 2.7.6-11.el8.x86_64 pciutils 3.6.4-2.el8.x86_64 pciutils-libs 3.6.4-2.el8.x86_64 pcre 8.42-4.el8.x86_64 pcre2 10.32-2.el8.x86_64 pcsc-lite 1.8.23-3.el8.x86_64 pcsc-lite-ccid 1.4.29-4.el8.x86_64 pcsc-lite-libs 1.8.23-3.el8.x86_64 perl-Carp 1.42-396.el8.noarch perl-Data-Dumper 2.167-399.el8.x86_64 perl-Digest 1.17-395.el8.noarch perl-Digest-MD5 2.55-396.el8.x86_64 perl-Encode 2.97-3.el8.x86_64 perl-Errno 1.28-416.el8.x86_64 perl-Exporter 5.72-396.el8.noarch perl-File-Path 2.15-2.el8.noarch perl-File-Temp 0.230.600-1.el8.noarch perl-Getopt-Long 2.50-4.el8.noarch perl-HTTP-Tiny 0.074-1.el8.noarch perl-IO 1.38-416.el8.x86_64 perl-IO-Socket-IP 0.39-5.el8.noarch perl-IO-Socket-SSL 2.066-4.el8.noarch perl-MIME-Base64 3.15-396.el8.x86_64 perl-Mozilla-CA 20160104-7.el8.noarch perl-Net-SSLeay 1.88-1.el8.x86_64 perl-PathTools 3.74-1.el8.x86_64 perl-Pod-Escapes 1.07-395.el8.noarch perl-Pod-Perldoc 3.28-396.el8.noarch perl-Pod-Simple 3.35-395.el8.noarch perl-Pod-Usage 1.69-395.el8.noarch perl-Scalar-List-Utils 1.49-2.el8.x86_64 perl-Socket 2.027-3.el8.x86_64 perl-Storable 3.11-3.el8.x86_64 perl-Term-ANSIColor 4.06-396.el8.noarch perl-Term-Cap 1.17-395.el8.noarch perl-Text-ParseWords 3.30-395.el8.noarch perl-Text-Tabs+Wrap 2013.0523-395.el8.noarch perl-Time-Local 1.280-1.el8.noarch perl-URI 1.73-3.el8.noarch perl-Unicode-Normalize 1.25-396.el8.x86_64 perl-constant 1.33-396.el8.noarch perl-interpreter 5.26.3-416.el8.x86_64 perl-libnet 3.11-3.el8.noarch perl-libs 5.26.3-416.el8.x86_64 perl-macros 5.26.3-416.el8.x86_64 perl-parent 0.237-1.el8.noarch perl-podlators 4.11-1.el8.noarch perl-srpm-macros 1-25.el8.noarch perl-threads 2.21-2.el8.x86_64 perl-threads-shared 1.58-2.el8.x86_64 pigz 2.4-4.el8.x86_64 pinentry 1.1.0-2.el8.x86_64 pixman 0.38.4-1.el8.x86_64 pkgconf 1.4.2-1.el8.x86_64 pkgconf-m4 1.4.2-1.el8.noarch pkgconf-pkg-config 1.4.2-1.el8.x86_64 pki-servlet-4.0-api 9.0.30-1.module+el8.3.0+6730+8f9c6254.noarch platform-python 3.6.8-31.el8.x86_64 platform-python-pip 9.0.3-18.el8.noarch platform-python-setuptools 39.2.0-6.el8.noarch policycoreutils 2.9-9.el8.x86_64 policycoreutils-python-utils 2.9-9.el8.noarch polkit 0.115-11.el8.x86_64 polkit-libs 0.115-11.el8.x86_64 polkit-pkla-compat 0.1-12.el8.x86_64 popt 1.16-14.el8.x86_64 postgresql 12.1-2.module+el8.1.1+4794+c82b6e09.x86_64 postgresql-contrib 12.1-2.module+el8.1.1+4794+c82b6e09.x86_64 postgresql-jdbc 42.2.3-3.el8_2.noarch postgresql-server 12.1-2.module+el8.1.1+4794+c82b6e09.x86_64 prefixdevname 0.1.0-6.el8.x86_64 procps-ng 3.3.15-3.el8.x86_64 psmisc 23.1-5.el8.x86_64 publicsuffix-list 20180723-1.el8.noarch publicsuffix-list-dafsa 20180723-1.el8.noarch python-srpm-macros 3-39.el8.noarch python3-aniso8601 0.82-4.el8ost.noarch python3-ansible-runner 1.4.5-1.el8ar.noarch python3-asn1crypto 0.24.0-3.el8.noarch python3-audit 3.0-0.17.20191104git1c2f876.el8.x86_64 python3-babel 2.5.1-5.el8.noarch python3-bcrypt 3.1.6-2.el8ev.x86_64 python3-bind 9.11.20-5.el8.noarch python3-cairo 1.16.3-6.el8.x86_64 python3-cffi 1.11.5-5.el8.x86_64 python3-chardet 3.0.4-7.el8.noarch python3-click 6.7-8.el8.noarch python3-configobj 5.0.6-11.el8.noarch python3-cryptography 2.3-3.el8.x86_64 python3-daemon 2.1.2-9.el8ar.noarch python3-dateutil 2.6.1-6.el8.noarch python3-dbus 1.2.4-15.el8.x86_64 python3-decorator 4.2.1-2.el8.noarch python3-dmidecode 3.12.2-15.el8.x86_64 python3-dnf 4.2.23-4.el8.noarch python3-dnf-plugin-versionlock 4.0.17-5.el8.noarch python3-dnf-plugins-core 4.0.17-5.el8.noarch python3-docutils 0.14-12.module+el8.1.0+3334+5cb623d7.noarch python3-ethtool 0.14-3.el8.x86_64 python3-firewall 0.8.2-2.el8.noarch python3-flask 1.0.2-2.el8ost.noarch python3-flask-restful 0.3.6-8.el8ost.noarch python3-gobject 3.28.3-2.el8.x86_64 python3-gobject-base 3.28.3-2.el8.x86_64 python3-gpg 1.13.1-3.el8.x86_64 python3-hawkey 0.48.0-5.el8.x86_64 python3-idna 2.5-5.el8.noarch python3-iniparse 0.4-31.el8.noarch python3-inotify 0.9.6-13.el8.noarch python3-itsdangerous 0.24-14.el8.noarch python3-jinja2 2.10.1-2.el8_0.noarch python3-jmespath 0.9.0-11.el8.noarch python3-jsonpatch 1.21-2.el8.noarch python3-jsonpointer 1.10-11.el8.noarch python3-jsonschema 2.6.0-4.el8.noarch python3-jwt 1.6.1-2.el8.noarch python3-ldap 3.1.0-5.el8.x86_64 python3-libcomps 0.1.11-4.el8.x86_64 python3-libdnf 0.48.0-5.el8.x86_64 python3-librepo 1.12.0-2.el8.x86_64 python3-libs 3.6.8-31.el8.x86_64 python3-libselinux 2.9-4.el8_3.x86_64 python3-libsemanage 2.9-3.el8.x86_64 python3-libxml2 2.9.7-8.el8.x86_64 python3-linux-procfs 0.6.2-2.el8.noarch python3-lockfile 0.11.0-8.el8ar.noarch python3-lxml 4.2.3-1.el8.x86_64 python3-m2crypto 0.35.2-5.el8ev.x86_64 python3-magic 5.33-16.el8.noarch python3-markupsafe 0.23-19.el8.x86_64 python3-mod_wsgi 4.6.4-4.el8.x86_64 python3-netaddr 0.7.19-8.1.el8ost.noarch python3-nftables 0.9.3-16.el8.x86_64 python3-notario 0.0.16-2.el8cp.noarch python3-numpy 1.14.3-9.el8.x86_64 python3-oauthlib 2.1.0-1.el8.noarch python3-openvswitch2.11 2.11.3-68.el8fdp.x86_64 python3-otopi 1.9.2-1.el8ev.noarch python3-ovirt-engine-lib 4.4.3.12-0.1.el8ev.noarch python3-ovirt-engine-sdk4 4.4.7-1.el8ev.x86_64 python3-ovirt-setup-lib 1.3.2-1.el8ev.noarch python3-ovsdbapp 0.17.1-0.20191216120142.206cf14.el8ost.noarch python3-paramiko 2.4.3-2.el8ev.noarch python3-passlib 1.7.0-5.el8ost.noarch python3-pbr 5.1.2-2.el8ost.noarch python3-perf 4.18.0-240.1.1.el8_3.x86_64 python3-pexpect 4.6-2.el8ost.noarch python3-pip 9.0.3-18.el8.noarch python3-pip-wheel 9.0.3-18.el8.noarch python3-ply 3.9-8.el8.noarch python3-policycoreutils 2.9-9.el8.noarch python3-prettytable 0.7.2-14.el8.noarch python3-psutil 5.4.3-10.el8.x86_64 python3-psycopg2 2.7.5-7.el8.x86_64 python3-ptyprocess 0.5.2-4.el8.noarch python3-pwquality 1.4.0-9.el8.x86_64 python3-pyOpenSSL 18.0.0-1.el8.noarch python3-pyasn1 0.3.7-6.el8.noarch python3-pyasn1-modules 0.3.7-6.el8.noarch python3-pycparser 2.14-14.el8.noarch python3-pycurl 7.43.0.2-4.el8.x86_64 python3-pydbus 0.6.0-5.el8.noarch python3-pynacl 1.3.0-5.el8ev.x86_64 python3-pyserial 3.1.1-8.el8.noarch python3-pysocks 1.6.8-3.el8.noarch python3-pytz 2017.2-9.el8.noarch python3-pyudev 0.21.0-7.el8.noarch python3-pyyaml 3.12-12.el8.x86_64 python3-requests 2.20.0-2.1.el8_1.noarch python3-rpm 4.14.3-4.el8.x86_64 python3-rpm-macros 3-39.el8.noarch python3-schedutils 0.6-6.el8.x86_64 python3-setools 4.3.0-2.el8.x86_64 python3-setuptools 39.2.0-6.el8.noarch python3-setuptools-wheel 39.2.0-6.el8.noarch python3-six 1.12.0-1.el8ost.noarch python3-slip 0.6.4-11.el8.noarch python3-slip-dbus 0.6.4-11.el8.noarch python3-subscription-manager-rhsm 1.27.16-1.el8.x86_64 python3-syspurpose 1.27.16-1.el8.x86_64 python3-systemd 234-8.el8.x86_64 python3-unbound 1.7.3-14.el8.x86_64 python3-urllib3 1.24.2-4.el8.noarch python3-websocket-client 0.54.0-1.el8ost.noarch python3-websockify 0.8.0-12.el8ev.noarch python3-werkzeug 0.16.0-1.el8ost.noarch python36 3.6.8-2.module+el8.1.0+3334+5cb623d7.x86_64 qemu-guest-agent 4.2.0-34.module+el8.3.0+7976+077be4ec.x86_64 qt5-srpm-macros 5.12.5-3.el8.noarch quota 4.04-10.el8.x86_64 quota-nls 4.04-10.el8.noarch readline 7.0-10.el8.x86_64 redhat-logos 81.1-1.el8.x86_64 redhat-logos-httpd 81.1-1.el8.noarch redhat-release 8.3-1.0.el8.x86_64 redhat-release-eula 8.3-1.0.el8.x86_64 redhat-rpm-config 123-1.el8.noarch relaxngDatatype 2011.1-7.module+el8.1.0+3366+6dfb954c.noarch resteasy 3.0.26-3.module+el8.2.0+5723+4574fbff.noarch rhel-system-roles 1.0-20.el8.noarch rhsm-icons 1.27.16-1.el8.noarch rhv-log-collector-analyzer 1.0.5-1.el8ev.noarch rhv-openvswitch 2.11-7.el8ev.noarch rhv-openvswitch-ovn-central 2.11-7.el8ev.noarch rhv-openvswitch-ovn-common 2.11-7.el8ev.noarch rhv-python-openvswitch 2.11-7.el8ev.noarch rhvm 4.4.3.12-0.1.el8ev.noarch rhvm-branding-rhv 4.4.6-1.el8ev.noarch rhvm-dependencies 4.4.1-1.el8ev.noarch rhvm-setup-plugins 4.4.2-1.el8ev.noarch rng-tools 6.8-3.el8.x86_64 rootfiles 8.1-22.el8.noarch rpcbind 1.2.5-7.el8.x86_64 rpm 4.14.3-4.el8.x86_64 rpm-build 4.14.3-4.el8.x86_64 rpm-build-libs 4.14.3-4.el8.x86_64 rpm-libs 4.14.3-4.el8.x86_64 rpm-plugin-selinux 4.14.3-4.el8.x86_64 rpm-plugin-systemd-inhibit 4.14.3-4.el8.x86_64 rpmdevtools 8.10-8.el8.noarch rsync 3.1.3-9.el8.x86_64 rsyslog 8.1911.0-6.el8.x86_64 rsyslog-elasticsearch 8.1911.0-6.el8.x86_64 rsyslog-mmjsonparse 8.1911.0-6.el8.x86_64 rsyslog-mmnormalize 8.1911.0-6.el8.x86_64 rust-srpm-macros 5-2.el8.noarch scap-security-guide 0.1.48-1.el8ev.noarch scl-utils 2.0.2-12.el8.x86_64 sed 4.5-2.el8.x86_64 selinux-policy 3.14.3-54.el8.noarch selinux-policy-targeted 3.14.3-54.el8.noarch setroubleshoot-plugins 3.3.13-1.el8.noarch setroubleshoot-server 3.3.24-1.el8.x86_64 setup 2.12.2-6.el8.noarch sg3_utils 1.44-5.el8.x86_64 sg3_utils-libs 1.44-5.el8.x86_64 sgml-common 0.6.3-50.el8.noarch shadow-utils 4.6-11.el8.x86_64 shared-mime-info 1.9-3.el8.x86_64 slang 2.3.2-3.el8.x86_64 slf4j 1.7.25-4.module+el8.1.0+3366+6dfb954c.noarch slf4j-jdk14 1.7.25-4.module+el8.1.0+3366+6dfb954c.noarch snappy 1.1.8-3.el8.x86_64 snmp4j 2.4.1-1.el8ev.noarch sos 3.9.1-6.el8.noarch source-highlight 3.1.8-16.el8.x86_64 spice-client-win-x64 8.3-2.el8.noarch spice-client-win-x86 8.3-2.el8.noarch sqlite 3.26.0-11.el8.x86_64 sqlite-libs 3.26.0-11.el8.x86_64 squashfs-tools 4.3-19.el8.x86_64 sscg 2.3.3-14.el8.x86_64 sshpass 1.06-3.el8ae.x86_64 sssd-client 2.2.3-20.el8.x86_64 sssd-common 2.2.3-20.el8.x86_64 sssd-kcm 2.2.3-20.el8.x86_64 sssd-nfs-idmap 2.2.3-20.el8.x86_64 stax-ex 1.7.7-8.module+el8.2.0+5723+4574fbff.noarch subscription-manager 1.27.16-1.el8.x86_64 subscription-manager-cockpit 1.27.16-1.el8.noarch subscription-manager-rhsm-certificates 1.27.16-1.el8.x86_64 sudo 1.8.29-6.el8.x86_64 systemd 239-41.el8_3.x86_64 systemd-libs 239-41.el8_3.x86_64 systemd-pam 239-41.el8_3.x86_64 systemd-udev 239-41.el8_3.x86_64 tar 1.30-5.el8.x86_64 tcl 8.6.8-2.el8.x86_64 tcpdump 4.9.3-1.el8.x86_64 teamd 1.31-2.el8.x86_64 timedatex 0.5-3.el8.x86_64 tmux 2.7-1.el8.x86_64 trousers 0.3.14-4.el8.x86_64 trousers-lib 0.3.14-4.el8.x86_64 ttmkfdir 3.0.9-54.el8.x86_64 tuned 2.14.0-3.el8.noarch tzdata 2020d-1.el8.noarch tzdata-java 2020d-1.el8.noarch unbound-libs 1.7.3-14.el8.x86_64 unboundid-ldapsdk 4.0.14-1.el8ev.noarch unzip 6.0-43.el8.x86_64 urw-base35-bookman-fonts 20170801-10.el8.noarch urw-base35-c059-fonts 20170801-10.el8.noarch urw-base35-d050000l-fonts 20170801-10.el8.noarch urw-base35-fonts 20170801-10.el8.noarch urw-base35-fonts-common 20170801-10.el8.noarch urw-base35-gothic-fonts 20170801-10.el8.noarch urw-base35-nimbus-mono-ps-fonts 20170801-10.el8.noarch urw-base35-nimbus-roman-fonts 20170801-10.el8.noarch urw-base35-nimbus-sans-fonts 20170801-10.el8.noarch urw-base35-p052-fonts 20170801-10.el8.noarch urw-base35-standard-symbols-ps-fonts 20170801-10.el8.noarch urw-base35-z003-fonts 20170801-10.el8.noarch usermode 1.113-1.el8.x86_64 util-linux 2.32.1-24.el8.x86_64 uuid 1.6.2-42.el8.x86_64 vdsm-jsonrpc-java 1.5.5-1.el8ev.noarch vim-filesystem 8.0.1763-13.el8.noarch vim-minimal 8.0.1763-13.el8.x86_64 virt-what 1.18-6.el8.x86_64 which 2.21-12.el8.x86_64 ws-commons-util 1.0.2-1.el8ev.noarch xfsprogs 5.0.0-4.el8.x86_64 xkeyboard-config 2.28-1.el8.noarch xml-common 0.6.3-50.el8.noarch xmlrpc-client 3.1.3-1.el8ev.noarch xmlrpc-common 3.1.3-1.el8ev.noarch xmlstreambuffer 1.5.4-8.module+el8.2.0+5723+4574fbff.noarch xorg-x11-font-utils 7.5-40.el8.x86_64 xorg-x11-fonts-ISO8859-1-100dpi 7.5-19.el8.noarch xorg-x11-fonts-Type1 7.5-19.el8.noarch xorg-x11-server-utils 7.7-27.el8.x86_64 xsom 0-19.20110809svn.module+el8.1.0+3366+6dfb954c.noarch xz 5.2.4-3.el8.x86_64 xz-libs 5.2.4-3.el8.x86_64 yajl 2.1.0-10.el8.x86_64 yum 4.2.23-4.el8.noarch yum-utils 4.0.17-5.el8.noarch zip 3.0-23.el8.x86_64 zlib 1.2.11-16.el8_2.x86_64 zstd 1.4.4-1.el8.x86_64 4.2. Red Hat Virtualization Host for RHEL 8 x86_64 (RPMs) The following table outlines the packages included in the Red Hat Virtualization Host 4.4.3 image. Table 4.2. Red Hat Virtualization Host for RHEL 8 x86_64 (RPMs) Name Version GConf2 3.2.6-22.el8.x86_64 NetworkManager 1.26.0-9.el8_3.x86_64 NetworkManager-config-server 1.26.0-9.el8_3.noarch NetworkManager-libnm 1.26.0-9.el8_3.x86_64 NetworkManager-ovs 1.26.0-9.el8_3.x86_64 NetworkManager-team 1.26.0-9.el8_3.x86_64 NetworkManager-tui 1.26.0-9.el8_3.x86_64 abattis-cantarell-fonts 0.0.25-4.el8.noarch abrt 2.10.9-20.el8.x86_64 abrt-addon-ccpp 2.10.9-20.el8.x86_64 abrt-addon-coredump-helper 2.10.9-20.el8.x86_64 abrt-addon-kerneloops 2.10.9-20.el8.x86_64 abrt-addon-pstoreoops 2.10.9-20.el8.x86_64 abrt-addon-vmcore 2.10.9-20.el8.x86_64 abrt-addon-xorg 2.10.9-20.el8.x86_64 abrt-cli 2.10.9-20.el8.x86_64 abrt-dbus 2.10.9-20.el8.x86_64 abrt-libs 2.10.9-20.el8.x86_64 abrt-tui 2.10.9-20.el8.x86_64 acl 2.2.53-1.el8.x86_64 aide 0.16-14.el8.x86_64 alsa-lib 1.2.3.2-1.el8.x86_64 ansible 2.9.14-1.el8ae.noarch attr 2.4.48-3.el8.x86_64 audispd-plugins 3.0-0.17.20191104git1c2f876.el8.x86_64 audit 3.0-0.17.20191104git1c2f876.el8.x86_64 audit-libs 3.0-0.17.20191104git1c2f876.el8.x86_64 augeas 1.12.0-5.el8.x86_64 augeas-libs 1.12.0-5.el8.x86_64 authselect 1.2.1-2.el8.x86_64 authselect-compat 1.2.1-2.el8.x86_64 authselect-libs 1.2.1-2.el8.x86_64 autofs 5.1.4-43.el8.x86_64 autogen-libopts 5.18.12-8.el8.x86_64 avahi-libs 0.7-19.el8.x86_64 basesystem 11-5.el8.noarch bash 4.4.19-12.el8.x86_64 bc 1.07.1-5.el8.x86_64 bind-export-libs 9.11.20-5.el8.x86_64 bind-libs 9.11.20-5.el8.x86_64 bind-libs-lite 9.11.20-5.el8.x86_64 bind-license 9.11.20-5.el8.noarch bind-utils 9.11.20-5.el8.x86_64 binutils 2.30-79.el8.x86_64 biosdevname 0.7.3-2.el8.x86_64 blivet-data 3.2.2-6.el8.noarch boost-atomic 1.66.0-10.el8.x86_64 boost-chrono 1.66.0-10.el8.x86_64 boost-date-time 1.66.0-10.el8.x86_64 boost-iostreams 1.66.0-10.el8.x86_64 boost-program-options 1.66.0-10.el8.x86_64 boost-random 1.66.0-10.el8.x86_64 boost-regex 1.66.0-10.el8.x86_64 boost-system 1.66.0-10.el8.x86_64 boost-thread 1.66.0-10.el8.x86_64 brotli 1.0.6-2.el8.x86_64 bzip2 1.0.6-26.el8.x86_64 bzip2-libs 1.0.6-26.el8.x86_64 c-ares 1.13.0-5.el8.x86_64 ca-certificates 2020.2.41-80.0.el8_2.noarch cairo 1.15.12-3.el8.x86_64 celt051 0.5.1.3-15.el8.x86_64 certmonger 0.79.7-15.el8.x86_64 checkpolicy 2.9-1.el8.x86_64 chkconfig 1.13-2.el8.x86_64 chrony 3.5-1.el8.x86_64 clevis 13-3.el8.x86_64 clevis-dracut 13-3.el8.x86_64 clevis-luks 13-3.el8.x86_64 clevis-systemd 13-3.el8.x86_64 cockpit 224.2-1.el8.x86_64 cockpit-bridge 224.2-1.el8.x86_64 cockpit-dashboard 224.2-1.el8.noarch cockpit-ovirt-dashboard 0.14.13-2.el8ev.noarch cockpit-storaged 224.2-1.el8.noarch cockpit-system 224.2-1.el8.noarch cockpit-ws 224.2-1.el8.x86_64 collectd 5.11.0-2.el8ost.x86_64 collectd-disk 5.11.0-2.el8ost.x86_64 collectd-netlink 5.11.0-2.el8ost.x86_64 collectd-virt 5.11.0-2.el8ost.x86_64 collectd-write_http 5.11.0-2.el8ost.x86_64 collectd-write_syslog 5.11.0-2.el8ost.x86_64 coreutils 8.30-8.el8.x86_64 coreutils-common 8.30-8.el8.x86_64 corosynclib 3.0.3-4.el8.x86_64 cpio 2.12-8.el8.x86_64 cracklib 2.9.6-15.el8.x86_64 cracklib-dicts 2.9.6-15.el8.x86_64 cronie 1.5.2-4.el8.x86_64 cronie-anacron 1.5.2-4.el8.x86_64 crontabs 1.11-16.20150630git.el8.noarch crypto-policies 20200713-1.git51d1222.el8.noarch crypto-policies-scripts 20200713-1.git51d1222.el8.noarch cryptsetup 2.3.3-2.el8.x86_64 cryptsetup-libs 2.3.3-2.el8.x86_64 cups-libs 2.2.6-38.el8.x86_64 curl 7.61.1-14.el8.x86_64 cyrus-sasl 2.1.27-5.el8.x86_64 cyrus-sasl-gssapi 2.1.27-5.el8.x86_64 cyrus-sasl-lib 2.1.27-5.el8.x86_64 cyrus-sasl-scram 2.1.27-5.el8.x86_64 daxctl-libs 67-2.el8.x86_64 dbus 1.12.8-11.el8.x86_64 dbus-common 1.12.8-11.el8.noarch dbus-daemon 1.12.8-11.el8.x86_64 dbus-glib 0.110-2.el8.x86_64 dbus-libs 1.12.8-11.el8.x86_64 dbus-tools 1.12.8-11.el8.x86_64 dbxtool 8-5.el8.x86_64 device-mapper 1.02.171-5.el8.x86_64 device-mapper-event 1.02.171-5.el8.x86_64 device-mapper-event-libs 1.02.171-5.el8.x86_64 device-mapper-libs 1.02.171-5.el8.x86_64 device-mapper-multipath 0.8.4-5.el8.x86_64 device-mapper-multipath-libs 0.8.4-5.el8.x86_64 device-mapper-persistent-data 0.8.5-4.el8.x86_64 dhcp-client 4.3.6-41.el8.x86_64 dhcp-common 4.3.6-41.el8.noarch dhcp-libs 4.3.6-41.el8.x86_64 diffutils 3.6-6.el8.x86_64 dmidecode 3.2-6.el8.x86_64 dnf 4.2.23-4.el8.noarch dnf-data 4.2.23-4.el8.noarch dnf-plugin-subscription-manager 1.27.16-1.el8.x86_64 dnf-plugins-core 4.0.17-5.el8.noarch dnsmasq 2.79-13.el8.x86_64 dosfstools 4.1-6.el8.x86_64 dracut 049-95.git20200804.el8.x86_64 dracut-config-generic 049-95.git20200804.el8.x86_64 dracut-network 049-95.git20200804.el8.x86_64 dracut-squash 049-95.git20200804.el8.x86_64 e2fsprogs 1.45.6-1.el8.x86_64 e2fsprogs-libs 1.45.6-1.el8.x86_64 edk2-ovmf 20200602gitca407c7246bf-3.el8.noarch efi-filesystem 3-2.el8.noarch efibootmgr 16-1.el8.x86_64 efivar 37-4.el8.x86_64 efivar-libs 37-4.el8.x86_64 elfutils 0.180-1.el8.x86_64 elfutils-default-yama-scope 0.180-1.el8.noarch elfutils-libelf 0.180-1.el8.x86_64 elfutils-libs 0.180-1.el8.x86_64 ethtool 5.0-2.el8.x86_64 expat 2.2.5-4.el8.x86_64 fcoe-utils 1.0.32-7.el8.x86_64 fence-agents-all 4.2.1-53.el8_3.1.x86_64 fence-agents-amt-ws 4.2.1-53.el8_3.1.noarch fence-agents-apc 4.2.1-53.el8_3.1.noarch fence-agents-apc-snmp 4.2.1-53.el8_3.1.noarch fence-agents-bladecenter 4.2.1-53.el8_3.1.noarch fence-agents-brocade 4.2.1-53.el8_3.1.noarch fence-agents-cisco-mds 4.2.1-53.el8_3.1.noarch fence-agents-cisco-ucs 4.2.1-53.el8_3.1.noarch fence-agents-common 4.2.1-53.el8_3.1.noarch fence-agents-compute 4.2.1-53.el8_3.1.noarch fence-agents-drac5 4.2.1-53.el8_3.1.noarch fence-agents-eaton-snmp 4.2.1-53.el8_3.1.noarch fence-agents-emerson 4.2.1-53.el8_3.1.noarch fence-agents-eps 4.2.1-53.el8_3.1.noarch fence-agents-heuristics-ping 4.2.1-53.el8_3.1.noarch fence-agents-hpblade 4.2.1-53.el8_3.1.noarch fence-agents-ibmblade 4.2.1-53.el8_3.1.noarch fence-agents-ifmib 4.2.1-53.el8_3.1.noarch fence-agents-ilo-moonshot 4.2.1-53.el8_3.1.noarch fence-agents-ilo-mp 4.2.1-53.el8_3.1.noarch fence-agents-ilo-ssh 4.2.1-53.el8_3.1.noarch fence-agents-ilo2 4.2.1-53.el8_3.1.noarch fence-agents-intelmodular 4.2.1-53.el8_3.1.noarch fence-agents-ipdu 4.2.1-53.el8_3.1.noarch fence-agents-ipmilan 4.2.1-53.el8_3.1.noarch fence-agents-kdump 4.2.1-53.el8_3.1.x86_64 fence-agents-mpath 4.2.1-53.el8_3.1.noarch fence-agents-redfish 4.2.1-53.el8_3.1.x86_64 fence-agents-rhevm 4.2.1-53.el8_3.1.noarch fence-agents-rsa 4.2.1-53.el8_3.1.noarch fence-agents-rsb 4.2.1-53.el8_3.1.noarch fence-agents-sbd 4.2.1-53.el8_3.1.noarch fence-agents-scsi 4.2.1-53.el8_3.1.noarch fence-agents-vmware-rest 4.2.1-53.el8_3.1.noarch fence-agents-vmware-soap 4.2.1-53.el8_3.1.noarch fence-agents-wti 4.2.1-53.el8_3.1.noarch fence-virt 1.0.0-1.el8.x86_64 file 5.33-16.el8.x86_64 file-libs 5.33-16.el8.x86_64 filesystem 3.8-3.el8.x86_64 findutils 4.6.0-20.el8.x86_64 firewalld 0.8.2-2.el8.noarch firewalld-filesystem 0.8.2-2.el8.noarch fontconfig 2.13.1-3.el8.x86_64 fontpackages-filesystem 1.44-22.el8.noarch freetype 2.9.1-4.el8_3.1.x86_64 fribidi 1.0.4-8.el8.x86_64 fuse 2.9.7-12.el8.x86_64 fuse-common 3.2.1-12.el8.x86_64 fuse-libs 2.9.7-12.el8.x86_64 gawk 4.2.1-1.el8.x86_64 gc 7.6.4-3.el8.x86_64 gdb-headless 8.2-12.el8.x86_64 gdbm 1.18-1.el8.x86_64 gdbm-libs 1.18-1.el8.x86_64 gdisk 1.0.3-6.el8.x86_64 genisoimage 1.1.11-39.el8.x86_64 gettext 0.19.8.1-17.el8.x86_64 gettext-libs 0.19.8.1-17.el8.x86_64 glib-networking 2.56.1-1.1.el8.x86_64 glib2 2.56.4-8.el8.x86_64 glibc 2.28-127.el8.x86_64 glibc-common 2.28-127.el8.x86_64 glibc-langpack-en 2.28-127.el8.x86_64 gluster-ansible-cluster 1.0-3.el8rhgs.noarch gluster-ansible-features 1.0.5-10.el8rhgs.noarch gluster-ansible-infra 1.0.4-17.el8rhgs.noarch gluster-ansible-maintenance 1.0.1-11.el8rhgs.noarch gluster-ansible-repositories 1.0.1-4.el8rhgs.noarch gluster-ansible-roles 1.0.5-22.el8rhgs.noarch glusterfs 6.0-37.1.el8rhgs.x86_64 glusterfs-api 6.0-37.1.el8rhgs.x86_64 glusterfs-cli 6.0-37.1.el8rhgs.x86_64 glusterfs-client-xlators 6.0-37.1.el8rhgs.x86_64 glusterfs-events 6.0-37.1.el8rhgs.x86_64 glusterfs-fuse 6.0-37.1.el8rhgs.x86_64 glusterfs-geo-replication 6.0-37.1.el8rhgs.x86_64 glusterfs-libs 6.0-37.1.el8rhgs.x86_64 glusterfs-rdma 6.0-37.1.el8rhgs.x86_64 glusterfs-server 6.0-37.1.el8rhgs.x86_64 gmp 6.1.2-10.el8.x86_64 gnupg2 2.2.20-2.el8.x86_64 gnutls 3.6.14-6.el8.x86_64 gnutls-dane 3.6.14-6.el8.x86_64 gnutls-utils 3.6.14-6.el8.x86_64 gobject-introspection 1.56.1-1.el8.x86_64 gpgme 1.13.1-3.el8.x86_64 graphite2 1.3.10-10.el8.x86_64 grep 3.1-6.el8.x86_64 groff-base 1.22.3-18.el8.x86_64 grub2-common 2.02-90.el8.noarch grub2-efi-x64 2.02-90.el8.x86_64 grub2-pc 2.02-90.el8.x86_64 grub2-pc-modules 2.02-90.el8.noarch grub2-tools 2.02-90.el8.x86_64 grub2-tools-extra 2.02-90.el8.x86_64 grub2-tools-minimal 2.02-90.el8.x86_64 grubby 8.40-41.el8.x86_64 gsettings-desktop-schemas 3.32.0-5.el8.x86_64 gssproxy 0.8.0-16.el8.x86_64 gstreamer1 1.16.1-2.el8.x86_64 gstreamer1-plugins-base 1.16.1-1.el8.x86_64 guile 2.0.14-7.el8.x86_64 gzip 1.9-9.el8.x86_64 harfbuzz 1.7.5-3.el8.x86_64 hdparm 9.54-2.el8.x86_64 hexedit 1.2.13-12.el8.x86_64 hivex 1.3.18-20.module+el8.3.0+6124+819ee737.x86_64 hostname 3.20-6.el8.x86_64 hwdata 0.314-8.6.el8.noarch ima-evm-utils 1.1-5.el8.x86_64 imgbased 1.2.13-0.1.el8ev.noarch info 6.5-6.el8.x86_64 initscripts 10.00.9-1.el8.x86_64 insights-client 3.1.0-3.el8.noarch ioprocess 1.4.2-1.el8ev.x86_64 iotop 0.6-16.el8.noarch ipa-client 4.8.7-13.module+el8.3.0+8376+0bba7131.x86_64 ipa-client-common 4.8.7-13.module+el8.3.0+8376+0bba7131.noarch ipa-common 4.8.7-13.module+el8.3.0+8376+0bba7131.noarch ipa-selinux 4.8.7-13.module+el8.3.0+8376+0bba7131.noarch ipcalc 0.2.4-4.el8.x86_64 iperf3 3.5-6.el8.x86_64 ipmitool 1.8.18-17.el8.x86_64 iproute 5.3.0-5.el8.x86_64 iproute-tc 5.3.0-5.el8.x86_64 iprutils 2.4.19-1.el8.x86_64 ipset 7.1-1.el8.x86_64 ipset-libs 7.1-1.el8.x86_64 iptables 1.8.4-15.el8.x86_64 iptables-ebtables 1.8.4-15.el8.x86_64 iptables-libs 1.8.4-15.el8.x86_64 iputils 20180629-2.el8.x86_64 ipxe-roms-qemu 20181214-6.git133f4c47.el8.noarch irqbalance 1.4.0-4.el8.x86_64 iscsi-initiator-utils 6.2.0.878-5.gitd791ce0.el8.x86_64 iscsi-initiator-utils-iscsiuio 6.2.0.878-5.gitd791ce0.el8.x86_64 isns-utils-libs 0.99-1.el8.x86_64 iso-codes 3.79-2.el8.noarch iwl100-firmware 39.31.5.1-99.el8.1.noarch iwl1000-firmware 39.31.5.1-99.el8.1.noarch iwl105-firmware 18.168.6.1-99.el8.1.noarch iwl135-firmware 18.168.6.1-99.el8.1.noarch iwl2000-firmware 18.168.6.1-99.el8.1.noarch iwl2030-firmware 18.168.6.1-99.el8.1.noarch iwl3160-firmware 25.30.13.0-99.el8.1.noarch iwl5000-firmware 8.83.5.1_1-99.el8.1.noarch iwl5150-firmware 8.24.2.2-99.el8.1.noarch iwl6000-firmware 9.221.4.1-99.el8.1.noarch iwl6000g2a-firmware 18.168.6.1-99.el8.1.noarch iwl6050-firmware 41.28.5.1-99.el8.1.noarch iwl7260-firmware 25.30.13.0-99.el8.1.noarch jansson 2.11-3.el8.x86_64 jose 10-2.el8.x86_64 jq 1.5-12.el8.x86_64 json-c 0.13.1-0.2.el8.x86_64 json-glib 1.4.4-1.el8.x86_64 kbd 2.0.4-10.el8.x86_64 kbd-legacy 2.0.4-10.el8.noarch kbd-misc 2.0.4-10.el8.noarch kernel 4.18.0-240.1.1.el8_3.x86_64 kernel-core 4.18.0-240.1.1.el8_3.x86_64 kernel-modules 4.18.0-240.1.1.el8_3.x86_64 kernel-tools 4.18.0-240.1.1.el8_3.x86_64 kernel-tools-libs 4.18.0-240.1.1.el8_3.x86_64 kexec-tools 2.0.20-34.el8.x86_64 keyutils 1.5.10-6.el8.x86_64 keyutils-libs 1.5.10-6.el8.x86_64 kmod 25-16.el8.x86_64 kmod-kvdo 6.2.3.114-74.el8.x86_64 kmod-libs 25-16.el8.x86_64 kpartx 0.8.4-5.el8.x86_64 krb5-libs 1.18.2-5.el8.x86_64 krb5-workstation 1.18.2-5.el8.x86_64 langpacks-en 1.0-12.el8.noarch less 530-1.el8.x86_64 libX11 1.6.8-3.el8.x86_64 libX11-common 1.6.8-3.el8.noarch libX11-xcb 1.6.8-3.el8.x86_64 libXau 1.0.9-3.el8.x86_64 libXdamage 1.1.4-14.el8.x86_64 libXext 1.3.4-1.el8.x86_64 libXfixes 5.0.3-7.el8.x86_64 libXft 2.3.3-1.el8.x86_64 libXrender 0.9.10-7.el8.x86_64 libXv 1.0.11-7.el8.x86_64 libXxf86vm 1.1.4-9.el8.x86_64 libacl 2.2.53-1.el8.x86_64 libaio 0.3.112-1.el8.x86_64 libarchive 3.3.2-9.el8.x86_64 libassuan 2.5.1-3.el8.x86_64 libatasmart 0.19-14.el8.x86_64 libatomic_ops 7.6.2-3.el8.x86_64 libattr 2.4.48-3.el8.x86_64 libbabeltrace 1.5.4-3.el8.x86_64 libbasicobjects 0.1.1-39.el8.x86_64 libblkid 2.32.1-24.el8.x86_64 libblockdev 2.24-1.el8.x86_64 libblockdev-crypto 2.24-1.el8.x86_64 libblockdev-dm 2.24-1.el8.x86_64 libblockdev-fs 2.24-1.el8.x86_64 libblockdev-kbd 2.24-1.el8.x86_64 libblockdev-loop 2.24-1.el8.x86_64 libblockdev-lvm 2.24-1.el8.x86_64 libblockdev-mdraid 2.24-1.el8.x86_64 libblockdev-mpath 2.24-1.el8.x86_64 libblockdev-nvdimm 2.24-1.el8.x86_64 libblockdev-part 2.24-1.el8.x86_64 libblockdev-plugins-all 2.24-1.el8.x86_64 libblockdev-swap 2.24-1.el8.x86_64 libblockdev-utils 2.24-1.el8.x86_64 libblockdev-vdo 2.24-1.el8.x86_64 libbytesize 1.4-3.el8.x86_64 libcacard 2.7.0-2.el8_1.x86_64 libcap 2.26-4.el8.x86_64 libcap-ng 0.7.9-5.el8.x86_64 libcollection 0.7.0-39.el8.x86_64 libcom_err 1.45.6-1.el8.x86_64 libcomps 0.1.11-4.el8.x86_64 libconfig 1.5-9.el8.x86_64 libcroco 0.6.12-4.el8_2.1.x86_64 libcurl 7.61.1-14.el8.x86_64 libdaemon 0.14-15.el8.x86_64 libdatrie 0.2.9-7.el8.x86_64 libdb 5.3.28-39.el8.x86_64 libdb-utils 5.3.28-39.el8.x86_64 libdhash 0.5.0-39.el8.x86_64 libdnf 0.48.0-5.el8.x86_64 libdrm 2.4.101-1.el8.x86_64 libedit 3.1-23.20170329cvs.el8.x86_64 libepoxy 1.5.3-1.el8.x86_64 libestr 0.1.10-1.el8.x86_64 libevent 2.1.8-5.el8.x86_64 libfastjson 0.99.8-2.el8.x86_64 libfdisk 2.32.1-24.el8.x86_64 libffi 3.1-22.el8.x86_64 libgcc 8.3.1-5.1.el8.x86_64 libgcrypt 1.8.5-4.el8.x86_64 libglvnd 1.2.0-6.el8.x86_64 libglvnd-egl 1.2.0-6.el8.x86_64 libglvnd-gles 1.2.0-6.el8.x86_64 libglvnd-glx 1.2.0-6.el8.x86_64 libgomp 8.3.1-5.1.el8.x86_64 libgpg-error 1.31-1.el8.x86_64 libgudev 232-4.el8.x86_64 libguestfs 1.42.0-2.module+el8.3.0+6798+ad6e66be.x86_64 libguestfs-tools-c 1.42.0-2.module+el8.3.0+6798+ad6e66be.x86_64 libguestfs-winsupport 8.2-1.module+el8.3.0+6124+819ee737.x86_64 libibumad 29.0-3.el8.x86_64 libibverbs 29.0-3.el8.x86_64 libicu 60.3-2.el8_1.x86_64 libidn2 2.2.0-1.el8.x86_64 libini_config 1.3.1-39.el8.x86_64 libipa_hbac 2.3.0-9.el8.x86_64 libipt 1.6.1-8.el8.x86_64 libiscsi 1.18.0-8.module+el8.3.0+6124+819ee737.x86_64 libjose 10-2.el8.x86_64 libjpeg-turbo 1.5.3-10.el8.x86_64 libkadm5 1.18.2-5.el8.x86_64 libkcapi 1.2.0-2.el8.x86_64 libkcapi-hmaccalc 1.2.0-2.el8.x86_64 libksba 1.3.5-7.el8.x86_64 libldb 2.1.3-2.el8.x86_64 liblognorm 2.0.5-1.el8.x86_64 libluksmeta 9-4.el8.x86_64 libmaxminddb 1.2.0-10.el8.x86_64 libmetalink 0.1.3-7.el8.x86_64 libmnl 1.0.4-6.el8.x86_64 libmodman 2.0.1-17.el8.x86_64 libmodulemd 2.9.4-2.el8.x86_64 libmount 2.32.1-24.el8.x86_64 libndp 1.7-3.el8.x86_64 libnetfilter_conntrack 1.0.6-5.el8.x86_64 libnfnetlink 1.0.1-13.el8.x86_64 libnfsidmap 2.3.3-35.el8.x86_64 libnftnl 1.1.5-4.el8.x86_64 libnghttp2 1.33.0-3.el8_2.1.x86_64 libnl3 3.5.0-1.el8.x86_64 libnl3-cli 3.5.0-1.el8.x86_64 libnsl2 1.2.0-2.20180605git4a062cf.el8.x86_64 libogg 1.3.2-10.el8.x86_64 libosinfo 1.8.0-1.el8.x86_64 libpath_utils 0.2.1-39.el8.x86_64 libpcap 1.9.1-4.el8.x86_64 libpciaccess 0.14-1.el8.x86_64 libpipeline 1.5.0-2.el8.x86_64 libpkgconf 1.4.2-1.el8.x86_64 libpmem 1.6.1-1.el8.x86_64 libpng 1.6.34-5.el8.x86_64 libproxy 0.4.15-5.2.el8.x86_64 libpsl 0.20.2-6.el8.x86_64 libpwquality 1.4.0-9.el8.x86_64 libqb 1.0.3-12.el8.x86_64 librados2 12.2.7-9.el8.x86_64 librbd1 12.2.7-9.el8.x86_64 librdmacm 29.0-3.el8.x86_64 libref_array 0.1.5-39.el8.x86_64 librepo 1.12.0-2.el8.x86_64 libreport 2.9.5-15.el8.x86_64 libreport-cli 2.9.5-15.el8.x86_64 libreport-filesystem 2.9.5-15.el8.x86_64 libreport-plugin-rhtsupport 2.9.5-15.el8.x86_64 libreport-plugin-ureport 2.9.5-15.el8.x86_64 libreport-rhel 2.9.5-15.el8.x86_64 libreport-web 2.9.5-15.el8.x86_64 librhsm 0.0.3-3.el8.x86_64 libseccomp 2.4.3-1.el8.x86_64 libselinux 2.9-4.el8_3.x86_64 libselinux-utils 2.9-4.el8_3.x86_64 libsemanage 2.9-3.el8.x86_64 libsepol 2.9-1.el8.x86_64 libsigsegv 2.11-5.el8.x86_64 libsmartcols 2.32.1-24.el8.x86_64 libsolv 0.7.11-1.el8.x86_64 libsoup 2.62.3-2.el8.x86_64 libss 1.45.6-1.el8.x86_64 libssh 0.9.4-2.el8.x86_64 libssh-config 0.9.4-2.el8.noarch libsss_autofs 2.3.0-9.el8.x86_64 libsss_certmap 2.3.0-9.el8.x86_64 libsss_idmap 2.3.0-9.el8.x86_64 libsss_nss_idmap 2.3.0-9.el8.x86_64 libsss_simpleifp 2.3.0-9.el8.x86_64 libstdc++ 8.3.1-5.1.el8.x86_64 libsysfs 2.1.0-24.el8.x86_64 libtalloc 2.3.1-3.el8rhgs.x86_64 libtar 1.2.20-15.el8.x86_64 libtasn1 4.13-3.el8.x86_64 libtdb 1.4.3-2.el8rhgs.x86_64 libteam 1.31-2.el8.x86_64 libtevent 0.10.2-3.el8rhgs.x86_64 libthai 0.1.27-2.el8.x86_64 libtheora 1.1.1-21.el8.x86_64 libtirpc 1.1.4-4.el8.x86_64 libtool-ltdl 2.4.6-25.el8.x86_64 libtpms 0.7.3-1.20200818git1d392d466a.module+el8.3.0+8092+f9e72d7e.x86_64 libudisks2 2.9.0-3.el8.x86_64 libunistring 0.9.9-3.el8.x86_64 libusal 1.1.11-39.el8.x86_64 libusbx 1.0.23-4.el8.x86_64 libuser 0.62-23.el8.x86_64 libutempter 1.1.6-14.el8.x86_64 libuuid 2.32.1-24.el8.x86_64 libverto 0.3.0-5.el8.x86_64 libverto-libevent 0.3.0-5.el8.x86_64 libvirt 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-admin 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-bash-completion 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-client 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-config-network 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-config-nwfilter 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-interface 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-network 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-nodedev 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-nwfilter 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-qemu 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-secret 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-core 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-disk 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-gluster 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-iscsi 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-iscsi-direct 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-logical 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-mpath 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-rbd 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-driver-storage-scsi 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-daemon-kvm 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-libs 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvirt-lock-sanlock 6.6.0-7.module+el8.3.0+8424+5ea525c5.x86_64 libvisual 0.4.0-24.el8.x86_64 libvorbis 1.3.6-2.el8.x86_64 libwayland-client 1.17.0-1.el8.x86_64 libwayland-cursor 1.17.0-1.el8.x86_64 libwayland-egl 1.17.0-1.el8.x86_64 libwayland-server 1.17.0-1.el8.x86_64 libwbclient 4.12.3-12.el8.3.x86_64 libwsman1 2.6.5-7.el8.x86_64 libxcb 1.13.1-1.el8.x86_64 libxcrypt 4.1.1-4.el8.x86_64 libxkbcommon 0.9.1-1.el8.x86_64 libxml2 2.9.7-8.el8.x86_64 libxshmfence 1.3-2.el8.x86_64 libxslt 1.1.32-5.el8.x86_64 libyaml 0.1.7-5.el8.x86_64 libzstd 1.4.4-1.el8.x86_64 linux-firmware 20200619-99.git3890db36.el8.noarch lksctp-tools 1.0.18-3.el8.x86_64 lldpad 1.0.1-13.git036e314.el8.x86_64 llvm-libs 10.0.1-3.module+el8.3.0+7719+53d428de.x86_64 lm_sensors-libs 3.4.0-21.20180522git70f7e08.el8.x86_64 logrotate 3.14.0-4.el8.x86_64 lshw B.02.19.2-2.el8.x86_64 lsof 4.93.2-1.el8.x86_64 lsscsi 0.30-1.el8.x86_64 lua-libs 5.3.4-11.el8.x86_64 luksmeta 9-4.el8.x86_64 lvm2 2.03.09-5.el8.x86_64 lvm2-libs 2.03.09-5.el8.x86_64 lz4 1.8.3-2.el8.x86_64 lz4-libs 1.8.3-2.el8.x86_64 lzo 2.08-14.el8.x86_64 lzop 1.03-20.el8.x86_64 mailx 12.5-29.el8.x86_64 man-db 2.7.6.1-17.el8.x86_64 mariadb-connector-c 3.0.7-1.el8.x86_64 mariadb-connector-c-config 3.0.7-1.el8.noarch mdadm 4.1-14.el8.x86_64 mdevctl 0.61-3.el8.noarch memtest86+ 5.01-19.el8.x86_64 mesa-dri-drivers 20.1.4-1.el8.x86_64 mesa-filesystem 20.1.4-1.el8.x86_64 mesa-libEGL 20.1.4-1.el8.x86_64 mesa-libGL 20.1.4-1.el8.x86_64 mesa-libgbm 20.1.4-1.el8.x86_64 mesa-libglapi 20.1.4-1.el8.x86_64 microcode_ctl 20200609-2.20201027.1.el8_3.x86_64 mokutil 0.3.0-10.el8.x86_64 mom 0.6.0-1.el8ev.noarch mozjs60 60.9.0-4.el8.x86_64 mpfr 3.1.6-1.el8.x86_64 mtools 4.0.18-14.el8.x86_64 nbdkit 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-basic-filters 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-basic-plugins 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-curl-plugin 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-python-plugin 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-server 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-ssh-plugin 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 nbdkit-vddk-plugin 1.22.0-2.module+el8.3.0+8203+18ecf00e.x86_64 ncurses 6.1-7.20180224.el8.x86_64 ncurses-base 6.1-7.20180224.el8.noarch ncurses-libs 6.1-7.20180224.el8.x86_64 ndctl 67-2.el8.x86_64 ndctl-libs 67-2.el8.x86_64 net-snmp 5.8-17.el8.x86_64 net-snmp-agent-libs 5.8-17.el8.x86_64 net-snmp-libs 5.8-17.el8.x86_64 net-snmp-utils 5.8-17.el8.x86_64 netcf-libs 0.2.8-12.module+el8.3.0+6124+819ee737.x86_64 nettle 3.4.1-2.el8.x86_64 network-scripts 10.00.9-1.el8.x86_64 newt 0.52.20-11.el8.x86_64 nfs-utils 2.3.3-35.el8.x86_64 nftables 0.9.3-16.el8.x86_64 nmap-ncat 7.70-5.el8.x86_64 nmstate 0.3.4-13.el8_3.noarch npth 1.5-4.el8.x86_64 nspr 4.25.0-2.el8_2.x86_64 nss 3.53.1-11.el8_2.x86_64 nss-softokn 3.53.1-11.el8_2.x86_64 nss-softokn-freebl 3.53.1-11.el8_2.x86_64 nss-sysinit 3.53.1-11.el8_2.x86_64 nss-tools 3.53.1-11.el8_2.x86_64 nss-util 3.53.1-11.el8_2.x86_64 numactl 2.0.12-11.el8.x86_64 numactl-libs 2.0.12-11.el8.x86_64 numad 0.5-26.20150602git.el8.x86_64 oddjob 0.34.5-3.el8.x86_64 oddjob-mkhomedir 0.34.5-3.el8.x86_64 oniguruma 6.8.2-2.el8.x86_64 openldap 2.4.46-15.el8.x86_64 opensc 0.20.0-2.el8.x86_64 openscap 1.3.3-5.el8.x86_64 openscap-scanner 1.3.3-5.el8.x86_64 openssh 8.0p1-5.el8.x86_64 openssh-clients 8.0p1-5.el8.x86_64 openssh-server 8.0p1-5.el8.x86_64 openssl 1.1.1g-11.el8.x86_64 openssl-libs 1.1.1g-11.el8.x86_64 openvswitch-selinux-extra-policy 1.0-22.el8fdp.noarch openvswitch2.11 2.11.3-68.el8fdp.x86_64 openwsman-python3 2.6.5-7.el8.x86_64 opus 1.3-0.4.beta.el8.x86_64 orc 0.4.28-3.el8.x86_64 os-prober 1.74-6.el8.x86_64 osinfo-db 20200813-1.el8.noarch osinfo-db-tools 1.8.0-1.el8.x86_64 otopi-common 1.9.2-1.el8ev.noarch ovirt-ansible-collection 1.2.2-1.el8ev.noarch ovirt-host 4.4.1-4.el8ev.x86_64 ovirt-host-dependencies 4.4.1-4.el8ev.x86_64 ovirt-hosted-engine-ha 2.4.5-1.el8ev.noarch ovirt-hosted-engine-setup 2.4.8-1.el8ev.noarch ovirt-imageio-client 2.1.1-1.el8ev.x86_64 ovirt-imageio-common 2.1.1-1.el8ev.x86_64 ovirt-imageio-daemon 2.1.1-1.el8ev.x86_64 ovirt-node-ng-nodectl 4.4.0-1.el8ev.noarch ovirt-provider-ovn-driver 1.2.32-1.el8ev.noarch ovirt-vmconsole 1.0.8-1.el8ev.noarch ovirt-vmconsole-host 1.0.8-1.el8ev.noarch ovn2.11 2.11.1-54.el8fdp.x86_64 ovn2.11-host 2.11.1-54.el8fdp.x86_64 p11-kit 0.23.14-5.el8_0.x86_64 p11-kit-trust 0.23.14-5.el8_0.x86_64 pacemaker-cluster-libs 2.0.4-6.el8.x86_64 pacemaker-libs 2.0.4-6.el8.x86_64 pacemaker-schemas 2.0.4-6.el8.noarch pam 1.3.1-11.el8.x86_64 pango 1.42.4-6.el8.x86_64 parted 3.2-38.el8.x86_64 passwd 0.80-3.el8.x86_64 pciutils 3.6.4-2.el8.x86_64 pciutils-libs 3.6.4-2.el8.x86_64 pcre 8.42-4.el8.x86_64 pcre2 10.32-2.el8.x86_64 pcsc-lite 1.8.23-3.el8.x86_64 pcsc-lite-ccid 1.4.29-4.el8.x86_64 pcsc-lite-libs 1.8.23-3.el8.x86_64 perl-Carp 1.42-396.el8.noarch perl-Data-Dumper 2.167-399.el8.x86_64 perl-Errno 1.28-416.el8.x86_64 perl-Exporter 5.72-396.el8.noarch perl-File-Path 2.15-2.el8.noarch perl-IO 1.38-416.el8.x86_64 perl-PathTools 3.74-1.el8.x86_64 perl-Scalar-List-Utils 1.49-2.el8.x86_64 perl-Socket 2.027-3.el8.x86_64 perl-Text-Tabs+Wrap 2013.0523-395.el8.noarch perl-Unicode-Normalize 1.25-396.el8.x86_64 perl-constant 1.33-396.el8.noarch perl-interpreter 5.26.3-416.el8.x86_64 perl-libs 5.26.3-416.el8.x86_64 perl-macros 5.26.3-416.el8.x86_64 perl-parent 0.237-1.el8.noarch perl-threads 2.21-2.el8.x86_64 perl-threads-shared 1.58-2.el8.x86_64 pixman 0.38.4-1.el8.x86_64 pkgconf 1.4.2-1.el8.x86_64 pkgconf-m4 1.4.2-1.el8.noarch pkgconf-pkg-config 1.4.2-1.el8.x86_64 platform-python 3.6.8-31.el8.x86_64 platform-python-pip 9.0.3-18.el8.noarch platform-python-setuptools 39.2.0-6.el8.noarch policycoreutils 2.9-9.el8.x86_64 policycoreutils-python-utils 2.9-9.el8.noarch polkit 0.115-11.el8.x86_64 polkit-libs 0.115-11.el8.x86_64 polkit-pkla-compat 0.1-12.el8.x86_64 popt 1.16-14.el8.x86_64 postfix 3.3.1-12.el8.x86_64 prefixdevname 0.1.0-6.el8.x86_64 procps-ng 3.3.15-3.el8.x86_64 psmisc 23.1-5.el8.x86_64 publicsuffix-list-dafsa 20180723-1.el8.noarch python3-abrt 2.10.9-20.el8.x86_64 python3-abrt-addon 2.10.9-20.el8.x86_64 python3-argcomplete 1.9.3-6.el8.noarch python3-asn1crypto 0.24.0-3.el8.noarch python3-audit 3.0-0.17.20191104git1c2f876.el8.x86_64 python3-augeas 0.5.0-12.el8.noarch python3-babel 2.5.1-5.el8.noarch python3-bind 9.11.20-5.el8.noarch python3-blivet 3.2.2-6.el8.noarch python3-blockdev 2.24-1.el8.x86_64 python3-bytesize 1.4-3.el8.x86_64 python3-cffi 1.11.5-5.el8.x86_64 python3-chardet 3.0.4-7.el8.noarch python3-configobj 5.0.6-11.el8.noarch python3-cryptography 2.3-3.el8.x86_64 python3-daemon 2.1.2-9.el8ar.noarch python3-dateutil 2.6.1-6.el8.noarch python3-dbus 1.2.4-15.el8.x86_64 python3-decorator 4.2.1-2.el8.noarch python3-dmidecode 3.12.2-15.el8.x86_64 python3-dnf 4.2.23-4.el8.noarch python3-dnf-plugin-versionlock 4.0.17-5.el8.noarch python3-dnf-plugins-core 4.0.17-5.el8.noarch python3-dns 1.15.0-10.el8.noarch python3-docutils 0.14-12.module+el8.1.0+3334+5cb623d7.noarch python3-ethtool 0.14-3.el8.x86_64 python3-firewall 0.8.2-2.el8.noarch python3-gluster 6.0-37.1.el8rhgs.x86_64 python3-gobject-base 3.28.3-2.el8.x86_64 python3-gpg 1.13.1-3.el8.x86_64 python3-gssapi 1.5.1-5.el8.x86_64 python3-hawkey 0.48.0-5.el8.x86_64 python3-idna 2.5-5.el8.noarch python3-imgbased 1.2.13-0.1.el8ev.noarch python3-iniparse 0.4-31.el8.noarch python3-inotify 0.9.6-13.el8.noarch python3-ioprocess 1.4.2-1.el8ev.x86_64 python3-ipaclient 4.8.7-13.module+el8.3.0+8376+0bba7131.noarch python3-ipalib 4.8.7-13.module+el8.3.0+8376+0bba7131.noarch python3-jinja2 2.10.1-2.el8_0.noarch python3-jmespath 0.9.0-11.el8.noarch python3-jsonschema 2.6.0-4.el8.noarch python3-jwcrypto 0.5.0-1.module+el8.1.0+4098+f286395e.noarch python3-ldap 3.1.0-5.el8.x86_64 python3-libcomps 0.1.11-4.el8.x86_64 python3-libdnf 0.48.0-5.el8.x86_64 python3-libipa_hbac 2.3.0-9.el8.x86_64 python3-libnmstate 0.3.4-13.el8_3.noarch python3-librepo 1.12.0-2.el8.x86_64 python3-libreport 2.9.5-15.el8.x86_64 python3-libs 3.6.8-31.el8.x86_64 python3-libselinux 2.9-4.el8_3.x86_64 python3-libsemanage 2.9-3.el8.x86_64 python3-libvirt 6.6.0-1.module+el8.3.0+7572+bcbf6b90.x86_64 python3-libxml2 2.9.7-8.el8.x86_64 python3-linux-procfs 0.6.2-2.el8.noarch python3-lockfile 0.11.0-8.el8ar.noarch python3-lxml 4.2.3-1.el8.x86_64 python3-magic 5.33-16.el8.noarch python3-markupsafe 0.23-19.el8.x86_64 python3-netaddr 0.7.19-8.1.el8ost.noarch python3-netifaces 0.10.6-4.el8.x86_64 python3-nftables 0.9.3-16.el8.x86_64 python3-openvswitch2.11 2.11.3-68.el8fdp.x86_64 python3-otopi 1.9.2-1.el8ev.noarch python3-ovirt-engine-sdk4 4.4.7-1.el8ev.x86_64 python3-ovirt-node-ng-nodectl 4.4.0-1.el8ev.noarch python3-ovirt-setup-lib 1.3.2-1.el8ev.noarch python3-passlib 1.7.0-5.el8ost.noarch python3-perf 4.18.0-240.1.1.el8_3.x86_64 python3-pexpect 4.6-2.el8ost.noarch python3-pip 9.0.3-18.el8.noarch python3-pip-wheel 9.0.3-18.el8.noarch python3-ply 3.9-8.el8.noarch python3-policycoreutils 2.9-9.el8.noarch python3-prettytable 0.7.2-14.el8.noarch python3-ptyprocess 0.5.2-4.el8.noarch python3-pyasn1 0.3.7-6.el8.noarch python3-pyasn1-modules 0.3.7-6.el8.noarch python3-pycparser 2.14-14.el8.noarch python3-pycurl 7.43.0.2-4.el8.x86_64 python3-pyparted 3.11.0-13.el8.x86_64 python3-pysocks 1.6.8-3.el8.noarch python3-pytz 2017.2-9.el8.noarch python3-pyudev 0.21.0-7.el8.noarch python3-pyusb 1.0.0-9.module+el8.1.0+4098+f286395e.noarch python3-pyxattr 0.5.3-19.el8ost.x86_64 python3-pyyaml 3.12-12.el8.x86_64 python3-qrcode-core 5.1-12.module+el8.1.0+4098+f286395e.noarch python3-requests 2.20.0-2.1.el8_1.noarch python3-rpm 4.14.3-4.el8.x86_64 python3-sanlock 3.8.2-1.el8.x86_64 python3-schedutils 0.6-6.el8.x86_64 python3-setools 4.3.0-2.el8.x86_64 python3-setuptools 39.2.0-6.el8.noarch python3-setuptools-wheel 39.2.0-6.el8.noarch python3-six 1.12.0-1.el8ost.noarch python3-slip 0.6.4-11.el8.noarch python3-slip-dbus 0.6.4-11.el8.noarch python3-sss 2.3.0-9.el8.x86_64 python3-sss-murmur 2.3.0-9.el8.x86_64 python3-sssdconfig 2.3.0-9.el8.noarch python3-subscription-manager-rhsm 1.27.16-1.el8.x86_64 python3-suds 0.7-0.8.94664ddd46a6.el8.noarch python3-syspurpose 1.27.16-1.el8.x86_64 python3-systemd 234-8.el8.x86_64 python3-urllib3 1.24.2-4.el8.noarch python3-yubico 1.3.2-9.module+el8.1.0+4098+f286395e.noarch python36 3.6.8-2.module+el8.1.0+3334+5cb623d7.x86_64 qemu-img 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-block-curl 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-block-gluster 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-block-iscsi 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-block-rbd 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-block-ssh 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-common 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 qemu-kvm-core 5.1.0-14.module+el8.3.0+8438+644aff69.x86_64 quota 4.04-10.el8.x86_64 quota-nls 4.04-10.el8.noarch radvd 2.17-15.el8.x86_64 rdma-core 29.0-3.el8.x86_64 readline 7.0-10.el8.x86_64 redhat-release-virtualization-host 4.4.3-1.el8ev.x86_64 redhat-release-virtualization-host-content 4.4.3-1.el8ev.x86_64 redhat-virtualization-host-image-update-placeholder 4.4.3-1.el8ev.noarch rhsm-icons 1.27.16-1.el8.noarch rhv-openvswitch 2.11-7.el8ev.noarch rhv-openvswitch-ovn-common 2.11-7.el8ev.noarch rhv-openvswitch-ovn-host 2.11-7.el8ev.noarch rhv-python-openvswitch 2.11-7.el8ev.noarch rng-tools 6.8-3.el8.x86_64 rootfiles 8.1-22.el8.noarch rpcbind 1.2.5-7.el8.x86_64 rpm 4.14.3-4.el8.x86_64 rpm-build-libs 4.14.3-4.el8.x86_64 rpm-libs 4.14.3-4.el8.x86_64 rpm-plugin-selinux 4.14.3-4.el8.x86_64 rsync 3.1.3-9.el8.x86_64 rsyslog 8.1911.0-6.el8.x86_64 rsyslog-elasticsearch 8.1911.0-6.el8.x86_64 rsyslog-mmjsonparse 8.1911.0-6.el8.x86_64 rsyslog-mmnormalize 8.1911.0-6.el8.x86_64 safelease 1.0.1-1.el8ev.x86_64 samba-client-libs 4.12.3-12.el8.3.x86_64 samba-common 4.12.3-12.el8.3.noarch samba-common-libs 4.12.3-12.el8.3.x86_64 sanlock 3.8.2-1.el8.x86_64 sanlock-lib 3.8.2-1.el8.x86_64 satyr 0.26-2.el8.x86_64 sbd 1.4.1-7.el8.x86_64 scap-security-guide 0.1.48-1.el8ev.noarch scap-security-guide-rhv 0.1.48-1.el8ev.noarch scrub 2.5.2-14.el8.x86_64 seabios-bin 1.14.0-1.module+el8.3.0+7638+07cf13d2.noarch seavgabios-bin 1.14.0-1.module+el8.3.0+7638+07cf13d2.noarch sed 4.5-2.el8.x86_64 selinux-policy 3.14.3-54.el8.noarch selinux-policy-targeted 3.14.3-54.el8.noarch setup 2.12.2-6.el8.noarch sg3_utils 1.44-5.el8.x86_64 sg3_utils-libs 1.44-5.el8.x86_64 sgabios-bin 0.20170427git-3.module+el8.3.0+6124+819ee737.noarch shadow-utils 4.6-11.el8.x86_64 shim-x64 15-16.el8.x86_64 slang 2.3.2-3.el8.x86_64 snappy 1.1.8-3.el8.x86_64 socat 1.7.3.3-2.el8.x86_64 sos 3.9.1-6.el8.noarch spice-server 0.14.3-3.el8.x86_64 sqlite-libs 3.26.0-11.el8.x86_64 squashfs-tools 4.3-19.el8.x86_64 sscg 2.3.3-14.el8.x86_64 sshpass 1.06-3.el8ae.x86_64 sssd-client 2.3.0-9.el8.x86_64 sssd-common 2.3.0-9.el8.x86_64 sssd-common-pac 2.3.0-9.el8.x86_64 sssd-dbus 2.3.0-9.el8.x86_64 sssd-ipa 2.3.0-9.el8.x86_64 sssd-kcm 2.3.0-9.el8.x86_64 sssd-krb5-common 2.3.0-9.el8.x86_64 sssd-tools 2.3.0-9.el8.x86_64 subscription-manager 1.27.16-1.el8.x86_64 subscription-manager-cockpit 1.27.16-1.el8.noarch subscription-manager-rhsm-certificates 1.27.16-1.el8.x86_64 sudo 1.8.29-6.el8.x86_64 supermin 5.2.0-1.module+el8.3.0+7648+42900458.x86_64 swtpm 0.4.0-3.20200828git0c238a2.module+el8.3.0+8254+568ca30d.x86_64 swtpm-libs 0.4.0-3.20200828git0c238a2.module+el8.3.0+8254+568ca30d.x86_64 swtpm-tools 0.4.0-3.20200828git0c238a2.module+el8.3.0+8254+568ca30d.x86_64 syslinux 6.04-4.el8.x86_64 syslinux-extlinux 6.04-4.el8.x86_64 syslinux-extlinux-nonlinux 6.04-4.el8.noarch syslinux-nonlinux 6.04-4.el8.noarch sysstat 11.7.3-5.el8.x86_64 systemd 239-41.el8_3.x86_64 systemd-container 239-41.el8_3.x86_64 systemd-libs 239-41.el8_3.x86_64 systemd-pam 239-41.el8_3.x86_64 systemd-udev 239-41.el8_3.x86_64 tar 1.30-5.el8.x86_64 tcpdump 4.9.3-1.el8.x86_64 teamd 1.31-2.el8.x86_64 tmux 2.7-1.el8.x86_64 tpm2-tools 4.1.1-1.el8.x86_64 tpm2-tss 2.3.2-2.el8.x86_64 tree 1.7.0-15.el8.x86_64 trousers 0.3.14-4.el8.x86_64 trousers-lib 0.3.14-4.el8.x86_64 tuned 2.14.0-3.el8.noarch tzdata 2020d-1.el8.noarch udisks2 2.9.0-3.el8.x86_64 unbound-libs 1.7.3-14.el8.x86_64 unzip 6.0-43.el8.x86_64 usbredir 0.8.0-1.el8.x86_64 usermode 1.113-1.el8.x86_64 userspace-rcu 0.10.1-2.el8.x86_64 util-linux 2.32.1-24.el8.x86_64 vdo 6.2.3.114-14.el8.x86_64 vdsm 4.40.35.1-1.el8ev.x86_64 vdsm-api 4.40.35.1-1.el8ev.noarch vdsm-client 4.40.35.1-1.el8ev.noarch vdsm-common 4.40.35.1-1.el8ev.noarch vdsm-gluster 4.40.35.1-1.el8ev.x86_64 vdsm-hook-ethtool-options 4.40.35.1-1.el8ev.noarch vdsm-hook-fcoe 4.40.35.1-1.el8ev.noarch vdsm-hook-openstacknet 4.40.35.1-1.el8ev.noarch vdsm-hook-vhostmd 4.40.35.1-1.el8ev.noarch vdsm-hook-vmfex-dev 4.40.35.1-1.el8ev.noarch vdsm-http 4.40.35.1-1.el8ev.noarch vdsm-jsonrpc 4.40.35.1-1.el8ev.noarch vdsm-network 4.40.35.1-1.el8ev.x86_64 vdsm-python 4.40.35.1-1.el8ev.noarch vdsm-yajsonrpc 4.40.35.1-1.el8ev.noarch vhostmd 1.1-4.el8.x86_64 vim-minimal 8.0.1763-15.el8.x86_64 virt-install 2.2.1-3.el8.noarch virt-manager-common 2.2.1-3.el8.noarch virt-v2v 1.42.0-6.module+el8.3.0+7898+13f907d5.x86_64 virt-what 1.18-6.el8.x86_64 virt-who 0.29.3-1.el8.noarch volume_key-libs 0.3.11-5.el8.x86_64 which 2.21-12.el8.x86_64 xfsprogs 5.0.0-4.el8.x86_64 xkeyboard-config 2.28-1.el8.noarch xml-common 0.6.3-50.el8.noarch xmlrpc-c 1.51.0-5.el8.x86_64 xmlrpc-c-client 1.51.0-5.el8.x86_64 xz 5.2.4-3.el8.x86_64 xz-libs 5.2.4-3.el8.x86_64 yajl 2.1.0-10.el8.x86_64 yum 4.2.23-4.el8.noarch zlib 1.2.11-16.el8_2.x86_64
null
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.4/html/package_manifest/ovirt-4.4.3
Chapter 1. 2023.Q4 release notes
Chapter 1. 2023.Q4 release notes Release scope This release includes the following new clients: Red Hat build of Apache Qpid Proton DotNet Red Hat build of Apache Qpid ProtonJ2 View all the clients from the Product Documentation for Red Hat AMQ Clients page. This release does not include a C++ or Python client. If you require these clients, see: Using the AMQ C++ Client 2.11 Using the AMQ Python Client 2.11 Documentation structure Starting in 2023.Q4, documentation for each Red Hat AMQ client is versioned and published independently. For example, if you navigate to Red Hat build of Rhea page, all versions of that client are available to browse. Removed AMQ Clients The following clients are no longer available: AMQ Ruby Client Use the 2.11 Client as a workaround. AMQ OpenWire JMS client Migrate to AMQ JMS or AMQ Core Protocol JMS. The CMS and NMS APIs Users of the CMS API can migrate to AMQ C++, and users of the NMS API can migrate to AMQ .NET. The CMS and NMS APIs might have reduced functionality in AMQ 7. Fixed Issues For a complete list of issues that have been fixed in this release, see the following link for each client. If the client is not listed, then no issues were fixed for that particular client. Alternatively, see AMQ Clients Resolved Issues Component URL Red Hat build of Rhea Red Hat build of Rhea 2023.Q4 Fixed Issues Red Hat build of Apache Qpid JMS Red Hat build of Apache Qpid JMS 2023.Q4 Fixed Issues AMQ Spring Boot Starter AMQ Spring Boot Starter 2023.Q4 Fixed Issues
null
https://docs.redhat.com/en/documentation/red_hat_amq_clients/2023.q4/html/amq_clients_overview/twothousandandtwenty-three_q4_release_notes
Chapter 26. Next steps
Chapter 26. steps Integrating Red Hat Decision Manager with Red Hat Single Sign-On Getting started with decision services
null
https://docs.redhat.com/en/documentation/red_hat_decision_manager/7.13/html/installing_and_configuring_red_hat_decision_manager/next_steps
14.7.6. Suspending the Host Physical Machine
14.7.6. Suspending the Host Physical Machine The nodesuspend command puts the host physical machine into a system-wide sleep state similar to that of Suspend-to-RAM (s3), Suspend-to-Disk (s4), or Hybrid-Suspend and sets up a Real-Time-Clock to wake up the node after the duration that is set has past. The --target option can be set to either mem , disk , or hybrid . These options indicate to set the memory, disk, or combination of the two to suspend. Setting the --duration instructs the host physical machine to wake up after the set duration time has run out. It is set in seconds. It is recommended that the duration time be longer than 60 seconds.
[ "virsh nodesuspend disk 60" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/virtualization_administration_guide/sub-sect-numa_node_management-suspending_the_host_physical_machine
Chapter 20. Changing a subscription service
Chapter 20. Changing a subscription service To manage the subscriptions, you can register a RHEL system with either Red Hat Subscription Management Server or Red Hat Satellite Server. If required, you can change the subscription service at a later point. To change the subscription service under which you are registered, unregister the system from the current service and then register it with a new service. To receive the system updates, register your system with either of the management servers. This section contains information about how to unregister your RHEL system from the Red Hat Subscription Management Server and Red Hat Satellite Server. Prerequisites You have registered your system with any one of the following: Red Hat Subscription Management Server Red Hat Satellite Server version 6.11 To receive the system updates, register your system with either of the management servers. 20.1. Unregistering from Subscription Management Server This section contains information about how to unregister a RHEL system from Red Hat Subscription Management Server, using a command line and the Subscription Manager user interface. 20.1.1. Unregistering using command line Use the unregister command to unregister a RHEL system from Red Hat Subscription Management Server. Procedure Run the unregister command as a root user, without any additional parameters. When prompted, provide a root password. The system is unregistered from the Subscription Management Server, and the status 'The system is currently not registered' is displayed with the Register button enabled. To continue uninterrupted services, re-register the system with either of the management services. If you do not register the system with a management service, you may fail to receive the system updates. For more information about registering a system, see Registering your system using the command line . Additional resources Using and Configuring Red Hat Subscription Manager 20.1.2. Unregistering using Subscription Manager user interface You can unregister a RHEL system from Red Hat Subscription Management Server by using Subscription Manager user interface. Procedure Log in to your system. From the top left-hand side of the window, click Activities . From the menu options, click the Show Applications icon. Click the Red Hat Subscription Manager icon, or enter Red Hat Subscription Manager in the search. Enter your administrator password in the Authentication Required dialog box. The Subscriptions window appears and displays the current status of Subscriptions, System Purpose, and installed products. Unregistered products display a red X. Authentication is required to perform privileged tasks on the system. Click the Unregister button. The system is unregistered from the Subscription Management Server, and the status 'The system is currently not registered' is displayed with the Register button enabled. To continue uninterrupted services, re-register the system with either of the management services. If you do not register the system with a management service, you may fail to receive the system updates. For more information about registering a system, see Registering your system using the Subscription Manager User Interface . Additional resources Using and Configuring Red Hat Subscription Manager 20.2. Unregistering from Satellite Server To unregister a Red Hat Enterprise Linux system from Satellite Server, remove the system from Satellite Server. For more information, see Removing a Host from Red Hat Satellite .
[ "subscription-manager unregister" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/automatically_installing_rhel/changing-a-subscripton-service_rhel-installer
Chapter 86. user
Chapter 86. user This chapter describes the commands under the user command. 86.1. user create Create new user Usage: Table 86.1. Positional Arguments Value Summary <name> New user name Table 86.2. Optional Arguments Value Summary -h, --help Show this help message and exit --domain <domain> Default domain (name or id) --project <project> Default project (name or id) --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. --password <password> Set user password --password-prompt Prompt interactively for password --email <email-address> Set user email address --description <description> User description --enable Enable user (default) --disable Disable user --or-show Return existing user Table 86.3. Output Formatters Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.4. JSON Formatter Value Summary --noindent Whether to disable indenting the json Table 86.5. Shell Formatter Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.6. Table Formatter Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.2. user delete Delete user(s) Usage: Table 86.7. Positional Arguments Value Summary <user> User(s) to delete (name or id) Table 86.8. Optional Arguments Value Summary -h, --help Show this help message and exit --domain <domain> Domain owning <user> (name or id) 86.3. user list List users Usage: Table 86.9. Optional Arguments Value Summary -h, --help Show this help message and exit --domain <domain> Filter users by <domain> (name or id) --group <group> Filter users by <group> membership (name or id) --project <project> Filter users by <project> (name or id) --long List additional fields in output Table 86.10. Output Formatters Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.11. CSV Formatter Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.12. JSON Formatter Value Summary --noindent Whether to disable indenting the json Table 86.13. Table Formatter Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.4. user password set Change current user password Usage: Table 86.14. Optional Arguments Value Summary -h, --help Show this help message and exit --password <new-password> New user password --original-password <original-password> Original user password 86.5. user set Set user properties Usage: Table 86.15. Positional Arguments Value Summary <user> User to modify (name or id) Table 86.16. Optional Arguments Value Summary -h, --help Show this help message and exit --name <name> Set user name --domain <domain> Domain the user belongs to (name or id). this can be used in case collisions between user names exist. --project <project> Set default project (name or id) --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. --password <password> Set user password --password-prompt Prompt interactively for password --email <email-address> Set user email address --description <description> Set user description --enable Enable user (default) --disable Disable user 86.6. user show Display user details Usage: Table 86.17. Positional Arguments Value Summary <user> User to display (name or id) Table 86.18. Optional Arguments Value Summary -h, --help Show this help message and exit --domain <domain> Domain owning <user> (name or id) Table 86.19. Output Formatters Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.20. JSON Formatter Value Summary --noindent Whether to disable indenting the json Table 86.21. Shell Formatter Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.22. Table Formatter Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show.
[ "openstack user create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--domain <domain>] [--project <project>] [--project-domain <project-domain>] [--password <password>] [--password-prompt] [--email <email-address>] [--description <description>] [--enable | --disable] [--or-show] <name>", "openstack user delete [-h] [--domain <domain>] <user> [<user> ...]", "openstack user list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--domain <domain>] [--group <group> | --project <project>] [--long]", "openstack user password set [-h] [--password <new-password>] [--original-password <original-password>]", "openstack user set [-h] [--name <name>] [--domain <domain>] [--project <project>] [--project-domain <project-domain>] [--password <password>] [--password-prompt] [--email <email-address>] [--description <description>] [--enable | --disable] <user>", "openstack user show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--domain <domain>] <user>" ]
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.0/html/command_line_interface_reference/user
Chapter 8. Getting Started with Data Transformation
Chapter 8. Getting Started with Data Transformation One of the challenges that comes with system and data integration is that the component systems often work with different data formats. You cannot simply send messages from one system to another without translating it into a format (or language) recognized by the receiving system. Data transformation is the term given to this translation. In this chapter, you learn how to include data transformation in a predefined Camel route. The Camel route directs messages from a source endpoint that produces XML data to a target endpoint that consumes JSON data. You add and define a data transformation component that maps the source's XML data format to the target's JSON data format. 8.1. Creating a project for the data transformation example Create a new Fuse Integration Project (select File New Fuse Integration Project ). Provide the following information in the wizard: Project name: starter Deployment platform: Standalone Runtime environment: Karaf/Fuse on Karaf Camel version: Use the default Template: Empty - Blueprint DSL Download the prepared data examples from: https://github.com/FuseByExample/fuse-tooling-tutorials/archive/user-guide-11.1.zip Extract the data folder and the three files that it contains from the user-guide-11.1.zip archive into the Fuse Integration project's src directory ( starter/src/data ). In the Project Explorer view, expand the starter project. Double-click Camel Contexts src/main/resources/OSGI-INF/blueprint/blueprint.xml to open the route in the route editor's Design tab. Click the Source tab to view the underlying XML. Replace <route id="_route1"/> with the following code: Click the Design tab to return to the graphical display of the route: 8.2. Adding a data transformation node to the Camel route In the Palette , expand the Transformation drawer. Click the Data Transformation pattern and then, in the canvas, click the arrow between the SetHeader _setHeader1 and To_to1 nodes. The New Transformation wizard opens with the Dozer File Path field auto-filled. Fill in the remaining fields: In the Transformation ID field, enter xml2json . For Source Type , select XML from the drop-down menu. For Target Type , select JSON from the drop-down menu. Click . The Source Type (XML) definition page opens, where you specify either an XML Schema (default) or an example XML Instance Document to provide the type definition of the source data: Leave XML Schema enabled. For Source file , browse to the location of the XML schema file or the XML instance file to use for the type definition of the source data, and select it (in this case, abc-order.xsd ). The XML Structure Preview pane displays a preview of the XML structure. In the Element root field, enter ABCOrder . The tooling uses this text to label the pane that displays the source data items to map. The Source Type (XML) definition page should now look like this: Click to open the Target Type (JSON) definition page. This is where you specify the type definition for the target data. Click JSON Instance Document . In the Target File field, enter the path to the xyz-order.json instance document, or browse to it. The JSON Structure Preview pane displays a preview of the JSON data structure: Click Finish . The transformation editor opens. This is where you can map data items in your XML source to data items in your JSON target. The transformation editor is composed of three panels: Source - lists the available data items of the source Mappings - displays the mappings between the source and target data items Target - lists the available data items of the target In addition, the editor's details pane, located just below the editor's three panels (once the first mapping has been made), graphically displays the hierarchical ancestors for both the mapped source and target data items currently selected. For example: Using the details pane, you can customize the mapping for the selected source and target data items: Set property - Modify an existing mapping or map a simple data item to one in a collection (see Section 8.8, "Mapping a simple data item to a data item in a collection" ). Set variable - Specify a constant value for a data item (see Section 8.5, "Mapping a constant variable to a data item" ). Set expression - Map a data item to the dynamic evaluation of a specified expression (see Section 8.6, "Mapping an expression to a data item" ). Add transformation - Modify the value of a mapped data item using a built-in function (see Section 8.9, "Adding a built-in function to a mapped data item" ). Add custom transformation - Modify the value of a mapped data item using the Java method you create or one you previously created (see Section 8.7, "Adding a custom transformation to a mapped data item" ). 8.3. Mapping source data items to target data items Expand all items in the Source and Target panels located on left and right sides of the Mappings panel. Drag a data item from the Source panel and drop it on its corresponding data item in the Target panel. For example, drag the customerNum data item from the Source panel and drop it on the custId data item in the Target panel. The mapping appears in the Mappings panel, and the details of both the Source and Target data items appear below in the details pane. Continue dragging and dropping source data items onto their corresponding target data items until you have completed all basic mappings. In the starter example, the remaining data items to map are: Source Target orderNum orderId status priority id itemId price cost quantity amount Note You can map collections (data items containing lists or sets) to non-collection data items and vice versa, but you cannot map collections to other collections. Click on both the Source and Target panels to quickly determine whether all data items have been mapped. Only data items that have not been mapped are listed in the Source and Target panels. In the starter example, the remaining unmapped Target attributes are approvalCode and origin . Click the blueprint.xml tab to return to the graphical display of the route: Click File Save . You can run a JUnit test on your transformation file after you create the transformation test. For details, see Section 8.4, "Creating the transformation test file and running the JUnit test" . If you do so at this point, you will see this output in the Console view: For the source XML data: For the target JSON data: 8.4. Creating the transformation test file and running the JUnit test Right-click the starter project in the Project Explorer view, and select New Other Fuse Tooling Fuse Transformation Test . Select to open the New Transformation Test wizard. In the New Transformation Test wizard, set the following values: Field Value Package example Camel File Path OSGI-INF/blueprint/blueprint.xml Transformation ID xml2json Click Finish . In the Project Explorer view, navigate to starter/src/test/java/example , and open the TransformationTest.java file. Add the following code to the transform method: Click File Save . You can now run a JUnit test on your transformation file at any point in these tutorials. In the Project Explorer view, expand the starter project to expose the /src/test/java/example/TransformationTest.java file. Right click it to open the context menu, and select Run as JUnit Test . The JUnit Test pane opens to display the status of the test. To avoid cluttering your workspace, drag and drop the pane in the bottom panel near the Console view. Open the Console view to see the log output. 8.5. Mapping a constant variable to a data item When a source/target data item has no corresponding target/source data item, you can map a constant variable to the existing data item. In the starter example, the target data item origin does not have a corresponding source data item. To map the origin attribute to a constant variable: In the Source panel, click the Variables view. In the Variables view, click to open the Enter a new variable name dialog. Enter a name for the variable you want to create. For the starter example, enter ORIGIN . Click OK . The newly created variable ORIGIN appears in the Variables view in the Name column and the default value ORIGIN in the Value column. Click the default value to edit it, and change the value to Web . Press Enter . Drag and drop the new variable ORIGIN onto the origin data item in the Target panel. The new mapping of the variable USD(ORIGIN) appears in the Mappings panel and in the details pane. Run a JUnit test on your TransformationTest.java file. For details, see Section 8.4, "Creating the transformation test file and running the JUnit test" . The Console view displays the JSON-formatted output data: 8.6. Mapping an expression to a data item This feature enables you, for example, to map a target data item to the dynamic evaluation of a Camel language expression. Use the target approvalCode data item, which lacks a corresponding source data item: Click to add an empty transformation map to the Mappings panel. From the Target panel, drag and drop the approvalCode data item to the target field of the newly created mapping in the Mappings panel. The approvalCode data item also appears in the details pane's target box. In the details pane, click on the ABCOrder source box to open the drop-down menu. Menu options depend on the selected data item's data type. The available options are bolded. Select Set expression to open the Expression dialog. In Language , select the expression language to use from the list of those available. Available options depend on the data item's data type. For the starter example, select Header . In the details pane, select the source of the expression to use. The options are Value and Script . For the starter example, click Value , and then enter ApprovalID . Click OK . Both the Mappings panel and the details pane display the new mapping for the target data item approvalCode . Run a JUnit test on your TransformationTest.java file. For details, see Section 8.4, "Creating the transformation test file and running the JUnit test" . The Console view displays the JSON-formatted output data: 8.7. Adding a custom transformation to a mapped data item You may need to modify the formatting of source data items when they do not satisfy the requirements of the target system. For example, to satisfy the target system's requirement that all customer IDs be enclosed in brackets: In the Mappings panel, select the customerNum mapping to populate the details pane. In the details pane, click on the ABCOrder source box to open the drop-down menu. Select Add custom transformation to open the Add Custom Transformation page. Click to the Class field to open the Create a New Java Class wizard. Modify the following fields: Package - Enter example . Name - Enter MyCustomMapper . Method Name - Change map to brackets . Leave all other fields as is. Click Finish . The Add Custom Transformation page opens with the Class and Method fields auto filled: Click OK to open the MyCustomMapper.java file in the Java editor: Edit the brackets method to change the last line return null; to this: Click the transformation.xml tab to switch back to the transformation editor. The details pane shows that the brackets method has been associated with the customerNum data item. The brackets method is executed on the source input before it is sent to the target system. Run a JUnit test on your TransformationTest.java file. For details, see Section 8.4, "Creating the transformation test file and running the JUnit test" . The Console view displays the JSON-formatted output data: 8.8. Mapping a simple data item to a data item in a collection In this tutorial, you will modify an existing mapping that maps all id s in the Source to the itemId s in the Target. The new mapping will map the customerNum data item in the Source to the itemId of the second item in the lineItems collection in the Target. With this change, no id s in the Source will be mapped to itemId s in the Target. In the Mappings panel, select the mapping id - > itemId to display the mapping in the details pane. On the Source box, click to open the drop-down menu, and select Set property . In the Select a property page, expand the header node and select customerNum . Click OK to save the changes. The details pane now shows that XyzOrder has a lineItems field. Click the toggle button to lineItems to increase its value to 1 . Note Indexes are zero-based, so a value of 1 selects the second instance of itemId in the collection. Notice that the details pane shows customerNum mapped to the itemId of the second item in the lineItems collection. Run a JUnit test on your TransformationTest.java file. For details, see Section 8.4, "Creating the transformation test file and running the JUnit test" . The Console view displays the JSON-formatted output data: 8.9. Adding a built-in function to a mapped data item You can use the built-in string-related functions to apply transformations to mapped data items. In the Transformations panel, select the status to priority mapping to populate the details pane. In the Source box, click to open the drop-down menu, and select Add transformation . In the Transformations pane, select append , and in the Arguments pane, enter -level for the value of suffix . This append function adds the specified suffix to the end of the status string before mapping it to the target priority data item. Click OK . By default, the details pane displays the results of adding the append function to the status data item in a user-friendly format. You can change this formatting by clicking the right-most on the Source box, and selecting Show standard formatting . Run a JUnit test on your TransformationTest.java file. For details, see Section 8.4, "Creating the transformation test file and running the JUnit test" . The Console view displays the JSON-formatted output data: 8.10. Publishing a Fuse Integration project with data transformation to a Red Hat Fuse server Before you publish your data transformation project to a Fuse server (see Chapter 28, Publishing Fuse Integration Projects to a Server ), you need to install the following features in the Fuse runtime: camel-dozer camel-jackson camel-jaxb To install the required features on the Fuse runtime: If not already there, switch to the Fuse Integration perspective. If necessary, add the Fuse server to the Servers list (see Section 27.1, "Adding a Server" ). Start the Fuse Server (see Section 27.2, "Starting a Server" ), and wait for the JBoss Fuse shell to appear in the Terminal view. For each of the required camel- features, at the JBossFuse:admin@root> prompt type: features:install camel-<featureName> Where featureName is one of dozer , jackson , or jaxb . To verify that each of the features was successfully installed, at the JBossFuse:admin@root> prompt type: features:list --ordered --installed You should see the camel features you just installed in the output listing:
[ "<route id=\"_route1\"> <from id=\"_from1\" uri=\"file:src/data?fileName=abc-order.xml&amp;noop=true\"/> <setHeader headerName=\"approvalID\" id=\"_setHeader1\"> <simple>AUTO_OK</simple> </setHeader> <to id=\"_to1\" uri=\"file:target/messages?fileName=xyz-order.json\"/> </route>", "<?xml version=\"1.0\" encoding=\"UTF-8\"?> <ABCOrder xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:java=\"http://java.sun.com\"> <header> <status>GOLD</status> <customer-num>ACME-123</customer-num> <order-num>ORDER1</order-num> </header> <order-items> <item id=\"PICKLE\"> <price>2.25</price> <quantity>1000</quantity> </item> <item id=\"BANANA\"> <price>1.25</price> <quantity>400</quantity> </item> </order-items> </ABCOrder>", "{\"custId\":\"ACME-123\",\"priority\":\"GOLD\",\"orderId\":\"ORDER1\",\"lineItems\":[{\"itemId\":\"PICKLE\", \"amount\":1000,\"cost\":2.25},{\"itemId\":\"BANANA\",\"amount\":400,\"cost\":1.25", "startEndpoint.sendBodyAndHeader(readFile(\"src/data/abc-order.xml\"), \"approvalID\", \"AUTO_OK\");", "{\"custId\":\"ACME-123\",\"priority\":\"GOLD\",\"orderId\":\"ORDER1\",\" origin\":\"Web \", \"approvalCode\":\"AUTO_OK\",\"lineItems\":[{\"itemId\":\"PICKLE\",\"amount\":1000,\"cost\":2.25}, {\"itemId\":\"BANANA\",\"amount\":400,\"cost\":1.25}]}", "{\"custId\":\"ACME-123\",\"priority\":\"GOLD\",\"orderId\":\"ORDER1\",\"origin\":\"Web\", \" approvalCode\":\"AUTO_OK \",\"lineItems\":[{\"itemId\":\"PICKLE\",\"amount\":1000,\"cost\":2.25}, {\"itemId\":\"BANANA\",\"amount\":400,\"cost\":1.25}]}", "return \"[\" + input + \"]\";", "{\"custId\":\"[ACME-123]\",\"priority\":\"GOLD\",\"orderId\":\"ORDER1\",\"origin\":\"Web\", \"approvalCode\":\"AUTO_OK\",\"lineItems\":[{\"itemId\":\"PICKLE\",\"amount\":1000,\"cost\":2.25}, {\"itemId\":\"BANANA\",\"amount\":400,\"cost\":1.25}]}", "{\"custId\":\"[ACME-123]\",\"priority\":\"GOLD\",\"orderId\":\"ORDER1\",\"origin\":\"Web\", \"approvalCode\":\"AUTO_OK\",\"lineItems\":[{\"amount\":1000,\"cost\":2.25}, {\" itemId\":\"ACME-123 \",\"amount\":400,\"cost\":1.25}]}", "{\"custId\":\"[ACME-123]\",\"priority\":\" GOLD-level \",\"orderId\":\"ORDER1\",\"origin\":\"Web\", \"approvalCode\":\"AUTO_OK\",\"lineItems\":[{\"amount\":1000,\"cost\":2.25},{\"itemId\":\"ACME-123\", \"amount\":400,\"cost\":1.25}]}" ]
https://docs.redhat.com/en/documentation/red_hat_fuse/7.13/html/tooling_user_guide/transtools
Chapter 30. Preventing resource overuse by using mutex
Chapter 30. Preventing resource overuse by using mutex Mutual exclusion (mutex) algorithms are used to prevent overuse of common resources. 30.1. Mutex options Mutual exclusion (mutex) algorithms are used to prevent processes simultaneously using a common resource. A fast user-space mutex (futex) is a tool that allows a user-space thread to claim a mutex without requiring a context switch to kernel space, provided the mutex is not already held by another thread. When you initialize a pthread_mutex_t object with the standard attributes, a private, non-recursive, non-robust, and non-priority inheritance-capable mutex is created. This object does not provide any of the benfits provided by the pthreads API and the RHEL for Real Time kernel. To benefit from the pthreads API and the RHEL for Real Time kernel, create a pthread_mutexattr_t object. This object stores the attributes defined for the futex. Note The terms futex and mutex are used to describe POSIX thread ( pthread ) mutex constructs. 30.2. Creating a mutex attribute object To define any additional capabilities for the mutex , create a pthread_mutexattr_t object. This object stores the defined attributes for the futex. This is a basic safety procedure that you must always perform. Procedure Create the mutex attribute object using one of the following: pthread_mutex_t( my_mutex ) ; pthread_mutexattr_t( &my_mutex_attr ) ; pthread_mutexattr_init( &my_mutex_attr ) ; For more information about advanced mutex attributes, see Advanced mutex attributes . 30.3. Creating a mutex with standard attributes When you initialize a pthread_mutex_t object with the standard attributes, a private, non-recursive, non-robust, and non-priority inheritance-capable mutex is created. Procedure Create a mutex object under pthreads using one of the following: pthread_mutex_t( my_mutex ); pthread_mutex_init( &my_mutex , &my_mutex_attr ); where &my_mutex_attr; is a mutex attribute object. 30.4. Advanced mutex attributes The following advanced mutex attributes can be stored in a mutex attribute object: Mutex attributes Shared and private mutexes Shared mutexes can be used between processes, however they can create a lot more overhead. pthread_mutexattr_setpshared(&my_mutex_attr, PTHREAD_PROCESS_SHARED); Real-time priority inheritance You can avoid priority inversion problems by using priority inheritance. pthread_mutexattr_setprotocol(&my_mutex_attr, PTHREAD_PRIO_INHERIT); Robust mutexes When a pthread dies, robust mutexes under the pthread are released. However, this comes with a high overhead cost. _NP in this string indicates that this option is non-POSIX or not portable. pthread_mutexattr_setrobust_np(&my_mutex_attr, PTHREAD_MUTEX_ROBUST_NP); Mutex initialization Shared mutexes can be used between processes, however, they can create a lot more overhead. pthread_mutex_init(&my_mutex_attr, &my_mutex); 30.5. Cleaning up a mutex attribute object After the mutex has been created using the mutex attribute object, you can keep the attribute object to initialize more mutexes of the same type, or you can clean it up. The mutex is not affected in either case. Procedure Clean up the attribute object using the pthread_mutexattr_destroy() function: The mutex now operates as a regular pthread_mutex and can be locked, unlocked, and destroyed as normal. 30.6. Additional resources futex(7) , pthread_mutex_destroy(P) , pthread_mutexattr_setprotocol(3p) , and pthread_mutexattr_setprioceiling(3p) man pages on your system
[ "pthread_mutexattr_destroy( &my_mutex_attr );" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_for_real_time/9/html/optimizing_rhel_9_for_real_time_for_low_latency_operation/assembly_preventing-resource-overuse-by-using-mutex_optimizing-rhel9-for-real-time-for-low-latency-operation
Chapter 4. Customer Portal Labs Relevant For Migration
Chapter 4. Customer Portal Labs Relevant For Migration Red Hat Customer Portal Labs are tools designed to help you improve performance, troubleshoot issues, identify security problems, and optimize configuration. This appendix provides an overview of Red Hat Customer Portal Labs relevant to migration. All Red Hat Customer Portal Labs are available at http://access.redhat.com/labs/ . Red Hat Enterprise Linux Upgrade Helper The Red Hat Enterprise Linux Update Helper is a tool that helps you upgrade your Red Hat Enterprise Linux from version 6.5/6.6/6.7/6.8/6.9 to version 7.x. The only information that you need to provide is your upgrade path. This application shows you: the basic steps to upgrade Red Hat Enterprise Linux extra steps that prevent known issues specific to your upgrade scenario This application supports the following upgrade paths: 6.5 to 7.4 6.6 to 7.4 6.7 to 7.4 6.8 to 7.4 6.9 to 7.4 Product Life Cycle Checker The Product Life Cycle Checker is a tool for viewing Red Hat products' life-cycle information, including General Availability, End of Support, and End of Life. With this tool, it is possible to choose multiple products and view their dates.
null
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/migration_planning_guide/appe-red_hat_enterprise_linux-migration_planning_guide-customer_portal_labs
Chapter 9. Setting up graphical representation of PCP metrics
Chapter 9. Setting up graphical representation of PCP metrics Using a combination of pcp , grafana , pcp redis , pcp bpftrace , and pcp vector provides graphical representation of the live data or data collected by Performance Co-Pilot (PCP). 9.1. Setting up PCP with pcp-zeroconf This procedure describes how to set up PCP on a system with the pcp-zeroconf package. Once the pcp-zeroconf package is installed, the system records the default set of metrics into archived files. Procedure Install the pcp-zeroconf package: Verification Ensure that the pmlogger service is active, and starts archiving the metrics: Additional resources pmlogger man page on your system Monitoring performance with Performance Co-Pilot 9.2. Setting up a Grafana server Grafana generates graphs that are accessible from a browser. The Grafana server is a back-end server for the Grafana dashboard. It listens, by default, on all interfaces, and provides web services accessed through the web browser. The grafana-pcp plugin interacts with the pmproxy daemon in the backend. This procedure describes how to set up a Grafana server. Prerequisites PCP is configured. For more information, see Setting up PCP with pcp-zeroconf . Procedure Install the following packages: Restart and enable the following service: Open the server's firewall for network traffic to the Grafana service. Verification Ensure that the Grafana server is listening and responding to requests: Ensure that the grafana-pcp plugin is installed: Additional resources pmproxy(1) and grafana-server(1) man pages on your system 9.3. Accessing the Grafana web UI This procedure describes how to access the Grafana web interface. Using the Grafana web interface, you can: Add PCP Redis, PCP bpftrace, and PCP Vector data sources Create dashboard View an overview of any useful metrics Create alerts in PCP Redis. Prerequisites PCP is configured. For more information, see Setting up PCP with pcp-zeroconf . The Grafana server is configured. For more information, see Setting up a Grafana server . Procedure On the client system, open http:// <grafana_server_IP_address_or_hostname> :3000 in your browser. For the first login, enter admin in both the Email or username and Password field. Grafana prompts to set a New password to create a secured account. In the menu, navigate to Administration and then click Plugins . In the Plugins tab, type performance co-pilot in the Search Grafana plugins text box and then click Performance Co-Pilot (PCP) plugin. In the Plugins / Performance Co-Pilot pane, click Enable . Click the Grafana icon . The Grafana Home page is displayed. Figure 9.1. Home Dashboard Note The top right corner of the screen has a settings (gear) icon that controls the general Dashboard settings . In the Grafana Home page, click Add your first data source to add PCP Redis, PCP bpftrace, and PCP Vector data sources. For more information about adding data source, see: To add pcp redis data source, view default dashboard, create a panel, and an alert rule, see Creating panels and alert in PCP Redis data source . To add pcp bpftrace data source and view the default dashboard, see Viewing the PCP bpftrace System Analysis dashboard . To add pcp vector data source, view the default dashboard, and to view the vector checklist, see Viewing the PCP Vector Checklist . Optional: From the menu, hover over the admin profile icon to update your Profile , view Notification history , Change password , or to Sign out . Additional resources grafana-cli(1) and grafana-server(1) man pages on your system 9.4. Configuring secure connections for Grafana You can establish secure connections between Grafana and Performance Co-Pilot (PCP) components. Establishing secure connections between these components helps prevent unauthorized parties from accessing or modifying the data being collected and monitored. Prerequisites PCP is installed. For more information, see Installing and enabling PCP . The Grafana server is configured. For more information, see Setting up a Grafana server . The private client key is stored in the /etc/grafana/grafana.key file. If you use a different path, modify the path in the corresponding steps of the procedure. For details about creating a private key and certificate signing request (CSR), as well as how to request a certificate from a certificate authority (CA), see your CA's documentation. The TLS client certificate is stored in the /etc/grafana/grafana.crt file. If you use a different path, modify the path in the corresponding steps of the procedure. Procedure As a root user, open the /etc/grafana/grafana.ini file and adjust the following options in the [server] section to reflect the following: Ensure grafana can access the certificates: Restart and enable the Grafana service to apply the configuration changes: Verification On the client system, open a browser and access the Grafana server machine on port 3000, using the https://192.0.2.0:3000 link. Replace 192.0.2.0 with your machine IP. Confirm the lock icon is displayed beside the address bar. Note If the protocol is set to http and an HTTPS connection is attempted, you will receive a ERR_SSL_PROTOCOL_ERROR error. If the protocol is set to https and an HTTP connection is attempted, the Grafana server responds with a "Client sent an HTTP request to an HTTPS server" message. 9.5. Configuring PCP Redis Use the PCP Redis data source to: View data archives Query time series using pmseries language Analyze data across multiple hosts Prerequisites PCP is configured. For more information, see Setting up PCP with pcp-zeroconf . The Grafana server is configured. For more information, see Setting up a Grafana server . Mail transfer agent, for example, sendmail or postfix is installed and configured. Procedure Install the redis package: Start and enable the following services: Restart the Grafana server: Verification Ensure that the pmproxy and redis are working: This command does not return any data if the redis package is not installed. Additional resources pmseries(1) man page on your system 9.6. Creating panels and alerts in PCP Redis data source After adding the PCP Redis data source, you can view the dashboard with an overview of useful metrics, add a query to visualize the load graph, and create alerts that help you to view the system issues after they occur. Prerequisites The PCP Redis is configured. For more information, see Configuring PCP Redis . The Grafana server is accessible. For more information, see Accessing the Grafana web UI . Procedure Log into the Grafana web UI. In the Grafana Home page, click Add your first data source . In the Add data source pane, type redis in the Filter by name or type text box and then click PCP Redis . In the Data Sources / PCP Redis pane, perform the following: Add http://localhost:44322 in the URL field and then click Save & Test . Click Dashboards tab Import PCP Redis: Host Overview to see a dashboard with an overview of any useful metrics. Optional: In the drop-down menu to the clock icon , you can set the timeline of the displayed metrics either by setting the absolute time range or by selecting a predefined range. You can also use the zoom out icon to modify the displayed time range. Note The time frame displayed by default might not be aligned with the time frame covered by the archive files created by the pmlogger service. Figure 9.2. PCP Redis: Host Overview Add a new panel: From the plus sign drop-down menu, select New dashboard . From the Add drop-down menu, select Visualization . In the Query tab, select the pcp-redis-datasource as the Data source . In the text field below A , enter metric, for example, kernel.all.load to visualize the kernel load graph. Optional: From the Time series drop-down menu on the right, select another format of visualization, for example, Bar chart , Table , or Heatmap . Optional: Add Panel title and Description , and update other options. Click Save to apply changes and save the dashboard. Add Title . Click Apply to apply changes and go back to the dashboard. Figure 9.3. PCP Redis query panel Create an alert rule: In the PCP Redis query panel , click Alert and then click New alert rule . Enter alert rule name. Define query and alert condition. Set evaluation behavior. Optional: Add annotations. Add labels and notifications. Click Save rule an exit to apply changes in alert rules. Click Apply to apply changes and go back to the dashboard. Figure 9.4. Creating alerts in the PCP Redis panel To add a notification channel for the created alert rule to receive an alert notification from Grafana, see Adding notification channels for alerts . 9.7. Adding notification channels for alerts By adding notification channels, you can receive an alert notification from Grafana whenever the alert rule conditions are met and the system needs further monitoring. You can receive these alerts after selecting any one type from the supported list of notifiers, which includes: Alertmanager Cisco Webex Teams DingDing Discord Email Google Chat Kafka REST Proxy LINE Microsoft Teams OpsGenie PagerDuty Pushover Sensu Go Slack Telegram Threema Gateway VictorOps WeCom Webhook Prerequisites The Grafana server is accessible. For more information, see Accessing the Grafana web UI . An alert rule is created. For more information, see Creating panels and alert in PCP Redis data source . Procedure Configure SMTP and add a valid sender's email address in the /etc/grafana/grafana.ini file: Restart the Grafana server. From the menu, select Alerting Contact points + Add contact point . Figure 9.5. Alerting in Grafana In the Create contact point details view, perform the following: Enter your name in the Name text box. Select the Integration type, for example, Email and enter the email address or multiple email addresses. Optional: Configure Optional Email settings and Notification settings . Click Save contact point . Select a notification channel in the alert rule: From the Alerting menu, select Notification policies . Click the three dots icon on the far right of Default policy and select Edit . Choose the Contact point you have just created and click Update default policy . Optional: Configure a nested policy in addition to the default policy. Optional: Configure Mute Timings . Additional resources Upstream Grafana documentation for alert notification policies 9.8. Setting up authentication between PCP components You can setup authentication using the scram-sha-256 authentication mechanism, which is supported by PCP through the Simple Authentication Security Layer (SASL) framework. Procedure Install the sasl framework for the scram-sha-256 authentication mechanism: Specify the supported authentication mechanism and the user database path in the pmcd.conf file: Create a new user: Replace metrics by your user name. Add the created user in the user database: To add the created user, you are required to enter the metrics account password. Set the permissions of the user database: Restart the pmcd service: Verification Verify the sasl configuration: Additional resources saslauthd(8) , pminfo(1) , and sha256 man pages on your system How can I setup authentication between PCP components, like PMDAs and pmcd in RHEL 8.2? (Red Hat Knowledgebase) 9.9. Installing PCP bpftrace Install the PCP bpftrace agent to introspect a system and to gather metrics from the kernel and user-space tracepoints. The bpftrace agent uses bpftrace scripts to gather the metrics. The bpftrace scripts use the enhanced Berkeley Packet Filter ( eBPF ). This procedure describes how to install a pcp bpftrace . Prerequisites PCP is configured. For more information, see Setting up PCP with pcp-zeroconf . The Grafana server is configured. For more information, see Setting up a Grafana server . The scram-sha-256 authentication mechanism is configured. For more information, see Setting up authentication between PCP components . Procedure Install the pcp-pmda-bpftrace package: Edit the bpftrace.conf file and add the user that you have created in Setting up authentication between PCP components : Replace metrics by your user name. Install bpftrace PMDA: The pmda-bpftrace is now installed, and can only be used after authenticating your user. For more information, see Viewing the PCP bpftrace System Analysis dashboard . Additional resources pmdabpftrace(1) and bpftrace man pages on your system 9.10. Viewing the PCP bpftrace System Analysis dashboard Using the PCP bpftrace data source, you can access the live data from sources which are not available as normal data from the pmlogger or archives In the PCP bpftrace data source, you can view the dashboard with an overview of useful metrics. Prerequisites The PCP bpftrace is installed. For more information, see Installing PCP bpftrace . The Grafana server is accessible. For more information, see Accessing the Grafana web UI . Procedure Log into the Grafana web UI. In the menu, navigate to Connections Data sources + Add new data source . In the Add data source pane, type bpftrace in the Filter by name or type text box and then click PCP bpftrace . In the Data Sources / pcp-bpftrace-datasource pane, perform the following: Add http://localhost:44322 in the URL field. Toggle the Basic Auth option and add the created user credentials in the User and Password field. Click Save & Test . Figure 9.6. Adding PCP bpftrace in the data source Click Dashboards tab Import PCP bpftrace: System Analysis to see a dashboard with an overview of any useful metrics. Figure 9.7. PCP bpftrace: System Analysis 9.11. Installing PCP Vector Install a pcp vector data source to show live, on-host metrics from the real-time pmwebapi interfaces. This data source is intended for on-demand performance monitoring of an individual host and includes container support. Prerequisites PCP is configured. For more information, see Setting up PCP with pcp-zeroconf . The Grafana server is configured. For more information, see Setting up a Grafana server . Procedure Install the pcp-pmda-bcc package: Install the bcc PMDA: Additional resources pmdabcc(1) man page on your system 9.12. Viewing the PCP Vector Checklist The PCP Vector data source displays live metrics and uses the pcp metrics. It analyzes data for individual hosts. After adding the PCP Vector data source, you can view the dashboard with an overview of useful metrics and view the related troubleshooting or reference links in the checklist. Prerequisites The PCP Vector is installed. For more information, see Installing PCP Vector . The Grafana server is accessible. For more information, see Accessing the Grafana web UI . Procedure Log into the Grafana web UI. In the menu, navigate to Connections Data sources + Add new data source . In the Add data source pane, type vector in the Filter by name or type text box and then click PCP Vector . In the Data Sources / pcp-vector-datasource pane, perform the following: Add http://localhost:44322 in the URL field and then click Save & Test . Click Dashboards tab Import PCP Vector: Host Overview to see a dashboard with an overview of any useful metrics. Figure 9.8. PCP Vector: Host Overview In the menu, navigate to Apps Performance Co-Pilot PCP Vector Checklist . In the PCP checklist, click the question mark icon for help or the warning icon to view the related troubleshooting or reference links. Figure 9.9. Performance Co-Pilot / PCP Vector Checklist 9.13. Using heatmaps in Grafana You can use heatmaps in Grafana to view histograms of your data over time, identify trends and patterns in your data, and see how they change over time. Each column within a heatmap represents a single histogram with different colored cells representing the different densities of observation of a given value within that histogram. Important This specific workflow is for the heatmaps in Grafana version 10 and later on RHEL9. Prerequisites PCP Redis is configured. For more information see Configuring PCP Redis . The Grafana server is accessible. For more information see Accessing the Grafana Web UI . The PCP Redis data source is configured. For more information see Creating panels and alerts in PCP Redis data source . Procedure In the menu, select Dashboards New Dashboard + Add visualization . In the Select data source pane, select pcp-redis-datasource . In the Query tab, in the text field below A , enter a metric, for example, kernel.all.load to visualize the kernel load graph. From the Time series drop-down menu on the right, select Heatmap . Optional: In the Panel Options dropdown menu, add a Panel Title and Description . In the Heatmap dropdown menu, under the Calculate from data setting, click Yes . Heatmap Optional: In the Colors dropdown menu, change the Scheme from the default Orange and select the number of steps (color shades). Optional: In the Tooltip dropdown menu, click the toggle to display a cell's position within its specific histogram when hovering your cursor over a cell in the heatmap. For example: Show histogram (Y Axis) cell display 9.14. Troubleshooting Grafana issues It is sometimes neccesary to troubleshoot Grafana issues, such as, Grafana does not display any data, the dashboard is black, or similar issues. Procedure Verify that the pmlogger service is up and running by executing the following command: Verify if files were created or modified to the disk by executing the following command: Verify that the pmproxy service is running by executing the following command: Verify that pmproxy is running, time series support is enabled, and a connection to Redis is established by viewing the /var/log/pcp/pmproxy/pmproxy.log file and ensure that it contains the following text: Here, 1716 is the PID of pmproxy, which will be different for every invocation of pmproxy . Verify if the Redis database contains any keys by executing the following command: Verify if any PCP metrics are in the Redis database and pmproxy is able to access them by executing the following commands: Verify if there are any errors in the Grafana logs by executing the following command:
[ "dnf install pcp-zeroconf", "pcp | grep pmlogger pmlogger: primary logger: /var/log/pcp/pmlogger/ localhost.localdomain/20200401.00.12", "dnf install grafana grafana-pcp", "systemctl restart grafana-server systemctl enable grafana-server", "firewall-cmd --permanent --add-service=grafana success firewall-cmd --reload success", "ss -ntlp | grep 3000 LISTEN 0 128 *:3000 *:* users:((\"grafana-server\",pid=19522,fd=7))", "grafana-cli plugins ls | grep performancecopilot-pcp-app performancecopilot-pcp-app @ 5.1.1", "protocol = https cert_key = /etc/grafana/grafana.key cert_file = /etc/grafana/grafana.crt", "su grafana -s /bin/bash -c 'ls -1 /etc/grafana/grafana.crt /etc/grafana/grafana.key' /etc/grafana/grafana.crt /etc/grafana/grafana.key", "systemctl restart grafana-server systemctl enable grafana-server", "dnf install redis", "systemctl start pmproxy redis systemctl enable pmproxy redis", "systemctl restart grafana-server", "pmseries disk.dev.read 2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df", "[smtp] enabled = true from_address = <sender_email_address>", "systemctl restart grafana-server.service", "dnf install cyrus-sasl-scram cyrus-sasl-lib", "vi /etc/sasl2/pmcd.conf mech_list: scram-sha-256 sasldb_path: /etc/pcp/passwd.db", "useradd -r metrics", "saslpasswd2 -a pmcd metrics Password: Again (for verification):", "chown root:pcp /etc/pcp/passwd.db chmod 640 /etc/pcp/passwd.db", "systemctl restart pmcd", "pminfo -f -h \"pcp://127.0.0.1?username= metrics \" disk.dev.read Password: disk.dev.read inst [0 or \"sda\"] value 19540", "dnf install pcp-pmda-bpftrace", "vi /var/lib/pcp/pmdas/bpftrace/bpftrace.conf [dynamic_scripts] enabled = true auth_enabled = true allowed_users = root, metrics", "cd /var/lib/pcp/pmdas/bpftrace/ ./Install Updating the Performance Metrics Name Space (PMNS) Terminate PMDA if already installed Updating the PMCD control file, and notifying PMCD Check bpftrace metrics have appeared ... 7 metrics and 6 values", "dnf install pcp-pmda-bcc", "cd /var/lib/pcp/pmdas/bcc ./Install [Wed Apr 1 00:27:48] pmdabcc(22341) Info: Initializing, currently in 'notready' state. [Wed Apr 1 00:27:48] pmdabcc(22341) Info: Enabled modules: [Wed Apr 1 00:27:48] pmdabcc(22341) Info: ['biolatency', 'sysfork', [...] Updating the Performance Metrics Name Space (PMNS) Terminate PMDA if already installed Updating the PMCD control file, and notifying PMCD Check bcc metrics have appeared ... 1 warnings, 1 metrics and 0 values", "systemctl status pmlogger", "ls /var/log/pcp/pmlogger/USD(hostname)/ -rlt total 4024 -rw-r--r--. 1 pcp pcp 45996 Oct 13 2019 20191013.20.07.meta.xz -rw-r--r--. 1 pcp pcp 412 Oct 13 2019 20191013.20.07.index -rw-r--r--. 1 pcp pcp 32188 Oct 13 2019 20191013.20.07.0.xz -rw-r--r--. 1 pcp pcp 44756 Oct 13 2019 20191013.20.30-00.meta.xz [..]", "systemctl status pmproxy", "pmproxy(1716) Info: Redis slots, command keys, schema version setup", "redis-cli dbsize (integer) 34837", "pmseries disk.dev.read 2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df pmseries \"disk.dev.read[count:10]\" 2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df [Mon Jul 26 12:21:10.085468000 2021] 117971 70e83e88d4e1857a3a31605c6d1333755f2dd17c [Mon Jul 26 12:21:00.087401000 2021] 117758 70e83e88d4e1857a3a31605c6d1333755f2dd17c [Mon Jul 26 12:20:50.085738000 2021] 116688 70e83e88d4e1857a3a31605c6d1333755f2dd17c [...]", "redis-cli --scan --pattern \"*USD(pmseries 'disk.dev.read')\" pcp:metric.name:series:2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df pcp:values:series:2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df pcp:desc:series:2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df pcp:labelvalue:series:2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df pcp:instances:series:2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df pcp:labelflags:series:2eb3e58d8f1e231361fb15cf1aa26fe534b4d9df", "journalctl -e -u grafana-server -- Logs begin at Mon 2021-07-26 11:55:10 IST, end at Mon 2021-07-26 12:30:15 IST. -- Jul 26 11:55:17 localhost.localdomain systemd[1]: Starting Grafana instance Jul 26 11:55:17 localhost.localdomain grafana-server[1171]: t=2021-07-26T11:55:17+0530 lvl=info msg=\"Starting Grafana\" logger=server version=7.3.6 c> Jul 26 11:55:17 localhost.localdomain grafana-server[1171]: t=2021-07-26T11:55:17+0530 lvl=info msg=\"Config loaded from\" logger=settings file=/usr/s> Jul 26 11:55:17 localhost.localdomain grafana-server[1171]: t=2021-07-26T11:55:17+0530 lvl=info msg=\"Config loaded from\" logger=settings file=/etc/g> [...]" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/monitoring_and_managing_system_status_and_performance/setting-up-graphical-representation-of-pcp-metrics_monitoring-and-managing-system-status-and-performance
Chapter 9. Updating a cluster that includes RHEL compute machines
Chapter 9. Updating a cluster that includes RHEL compute machines You can update, or upgrade, an OpenShift Container Platform cluster. If your cluster contains Red Hat Enterprise Linux (RHEL) machines, you must perform more steps to update those machines. 9.1. Prerequisites Have access to the cluster as a user with admin privileges. See Using RBAC to define and apply permissions . Have a recent etcd backup in case your update fails and you must restore your cluster to a state . If your cluster uses manually maintained credentials, ensure that the Cloud Credential Operator (CCO) is in an upgradeable state. For more information, see Upgrading clusters with manually maintained credentials for AWS , Azure , or GCP . If your cluster uses manually maintained credentials with the AWS Security Token Service (STS), obtain a copy of the ccoctl utility from the release image being updated to and use it to process any updated credentials. For more information, see Upgrading an OpenShift Container Platform cluster configured for manual mode with STS . If you run an Operator or you have configured any application with the pod disruption budget, you might experience an interruption during the upgrade process. If minAvailable is set to 1 in PodDisruptionBudget , the nodes are drained to apply pending machine configs which might block the eviction process. If several nodes are rebooted, all the pods might run on only one node, and the PodDisruptionBudget field can prevent the node drain. Additional resources Support policy for unmanaged Operators 9.2. Updating a cluster by using the web console If updates are available, you can update your cluster from the web console. You can find information about available OpenShift Container Platform advisories and updates in the errata section of the Customer Portal. Prerequisites Have access to the web console as a user with admin privileges. Pause all MachineHealthCheck resources. Procedure From the web console, click Administration Cluster Settings and review the contents of the Details tab. For production clusters, ensure that the Channel is set to the correct channel for the version that you want to update to, such as stable-4.9 . Important For production clusters, you must subscribe to a stable-* , eus-* or fast-* channel. If the Update status is not Updates available , you cannot upgrade your cluster. Select channel indicates the cluster version that your cluster is running or is updating to. Select a version to update to and click Save . The Input channel Update status changes to Update to <product-version> in progress , and you can review the progress of the cluster update by watching the progress bars for the Operators and nodes. Note If you are upgrading your cluster to the minor version, like version 4.y to 4.(y+1), it is recommended to confirm your nodes are upgraded before deploying workloads that rely on a new feature. Any pools with worker nodes that are not yet updated are displayed on the Cluster Settings page. After the update completes and the Cluster Version Operator refreshes the available updates, check if more updates are available in your current channel. If updates are available, continue to perform updates in the current channel until you can no longer update. If no updates are available, change the Channel to the stable-* , eus-* or fast-* channel for the minor version, and update to the version that you want in that channel. You might need to perform several intermediate updates until you reach the version that you want. Note When you update a cluster that contains Red Hat Enterprise Linux (RHEL) worker machines, those workers temporarily become unavailable during the update process. You must run the upgrade playbook against each RHEL machine as it enters the NotReady state for the cluster to finish updating. 9.3. Optional: Adding hooks to perform Ansible tasks on RHEL machines You can use hooks to run Ansible tasks on the RHEL compute machines during the OpenShift Container Platform update. 9.3.1. About Ansible hooks for upgrades When you update OpenShift Container Platform, you can run custom tasks on your Red Hat Enterprise Linux (RHEL) nodes during specific operations by using hooks . Hooks allow you to provide files that define tasks to run before or after specific update tasks. You can use hooks to validate or modify custom infrastructure when you update the RHEL compute nodes in you OpenShift Container Platform cluster. Because when a hook fails, the operation fails, you must design hooks that are idempotent, or can run multiple times and provide the same results. Hooks have the following important limitations: - Hooks do not have a defined or versioned interface. They can use internal openshift-ansible variables, but it is possible that the variables will be modified or removed in future OpenShift Container Platform releases. - Hooks do not have error handling, so an error in a hook halts the update process. If you get an error, you must address the problem and then start the upgrade again. 9.3.2. Configuring the Ansible inventory file to use hooks You define the hooks to use when you update the Red Hat Enterprise Linux (RHEL) compute machines, which are also known as worker machines, in the hosts inventory file under the all:vars section. Prerequisites You have access to the machine that you used to add the RHEL compute machines cluster. You must have access to the hosts Ansible inventory file that defines your RHEL machines. Procedure After you design the hook, create a YAML file that defines the Ansible tasks for it. This file must be a set of tasks and cannot be a playbook, as shown in the following example: --- # Trivial example forcing an operator to acknowledge the start of an upgrade # file=/home/user/openshift-ansible/hooks/pre_compute.yml - name: note the start of a compute machine update debug: msg: "Compute machine upgrade of {{ inventory_hostname }} is about to start" - name: require the user agree to start an upgrade pause: prompt: "Press Enter to start the compute machine update" Modify the hosts Ansible inventory file to specify the hook files. The hook files are specified as parameter values in the [all:vars] section, as shown: Example hook definitions in an inventory file To avoid ambiguity in the paths to the hook, use absolute paths instead of a relative paths in their definitions. 9.3.3. Available hooks for RHEL compute machines You can use the following hooks when you update the Red Hat Enterprise Linux (RHEL) compute machines in your OpenShift Container Platform cluster. Hook name Description openshift_node_pre_cordon_hook Runs before each node is cordoned. This hook runs against each node in serial. If a task must run against a different host, the task must use delegate_to or local_action . openshift_node_pre_upgrade_hook Runs after each node is cordoned but before it is updated. This hook runs against each node in serial. If a task must run against a different host, the task must use delegate_to or local_action . openshift_node_pre_uncordon_hook Runs after each node is updated but before it is uncordoned. This hook runs against each node in serial. If a task must run against a different host, they task must use delegate_to or local_action . openshift_node_post_upgrade_hook Runs after each node uncordoned. It is the last node update action. This hook runs against each node in serial. If a task must run against a different host, the task must use delegate_to or local_action . 9.4. Updating RHEL compute machines in your cluster After you update your cluster, you must update the Red Hat Enterprise Linux (RHEL) compute machines in your cluster. Important Red Hat Enterprise Linux (RHEL) version 8.4 and version 8.5 is supported for RHEL worker (compute) machines. You can also update your compute machines to another minor version of OpenShift Container Platform if you are using RHEL as the operating system. You do not need to exclude any RPM packages from RHEL when performing a minor version update. Important You cannot upgrade RHEL 7 compute machines to RHEL 8. You must deploy new RHEL 8 hosts, and the old RHEL 7 hosts should be removed. Prerequisites You updated your cluster. Important Because the RHEL machines require assets that are generated by the cluster to complete the update process, you must update the cluster before you update the RHEL worker machines in it. You have access to the local machine that you used to add the RHEL compute machines to your cluster. You must have access to the hosts Ansible inventory file that defines your RHEL machines and the upgrade playbook. For updates to a minor version, the RPM repository is using the same version of OpenShift Container Platform that is running on your cluster. Procedure Stop and disable firewalld on the host: # systemctl disable --now firewalld.service Note By default, the base OS RHEL with "Minimal" installation option enables firewalld serivce. Having the firewalld service enabled on your host prevents you from accessing OpenShift Container Platform logs on the worker. Do not enable firewalld later if you wish to continue accessing OpenShift Container Platform logs on the worker. Enable the repositories that are required for OpenShift Container Platform 4.9: On the machine that you run the Ansible playbooks, update the required repositories: # subscription-manager repos --disable=rhel-7-server-ose-4.8-rpms \ --enable=rhel-7-server-ansible-2.9-rpms \ --enable=rhel-7-server-ose-4.9-rpms On the machine that you run the Ansible playbooks, update the required packages, including openshift-ansible : # yum update openshift-ansible openshift-clients On each RHEL compute node, update the required repositories: # subscription-manager repos --disable=rhel-7-server-ose-4.8-rpms \ --enable=rhel-7-server-ose-4.9-rpms \ --enable=rhel-7-fast-datapath-rpms \ --enable=rhel-7-server-optional-rpms Update a RHEL worker machine: Review the current node status to determine which RHEL worker to update: # oc get node Example output NAME STATUS ROLES AGE VERSION mycluster-control-plane-0 Ready master 145m v1.22.1 mycluster-control-plane-1 Ready master 145m v1.22.1 mycluster-control-plane-2 Ready master 145m v1.22.1 mycluster-rhel7-0 NotReady,SchedulingDisabled worker 98m v1.22.1 mycluster-rhel7-1 Ready worker 98m v1.22.1 mycluster-rhel7-2 Ready worker 98m v1.22.1 mycluster-rhel7-3 Ready worker 98m v1.22.1 Note which machine has the NotReady,SchedulingDisabled status. Review your Ansible inventory file at /<path>/inventory/hosts and update its contents so that only the machine with the NotReady,SchedulingDisabled status is listed in the [workers] section, as shown in the following example: Change to the openshift-ansible directory: USD cd /usr/share/ansible/openshift-ansible Run the upgrade playbook: USD ansible-playbook -i /<path>/inventory/hosts playbooks/upgrade.yml 1 1 For <path> , specify the path to the Ansible inventory file that you created. Note The upgrade playbook only upgrades the OpenShift Container Platform packages. It does not update the operating system packages. Follow the process in the step to update each RHEL worker machine in your cluster. After you update all of the workers, confirm that all of your cluster nodes have updated to the new version: # oc get node Example output NAME STATUS ROLES AGE VERSION mycluster-control-plane-0 Ready master 145m v1.22.1 mycluster-control-plane-1 Ready master 145m v1.22.1 mycluster-control-plane-2 Ready master 145m v1.22.1 mycluster-rhel7-0 NotReady,SchedulingDisabled worker 98m v1.22.1 mycluster-rhel7-1 Ready worker 98m v1.22.1 mycluster-rhel7-2 Ready worker 98m v1.22.1 mycluster-rhel7-3 Ready worker 98m v1.22.1 Optional: Update the operating system packages that were not updated by the upgrade playbook. To update packages that are not on 4.9, use the following command: # yum update Note You do not need to exclude RPM packages if you are using the same RPM repository that you used when you installed 4.9.
[ "--- Trivial example forcing an operator to acknowledge the start of an upgrade file=/home/user/openshift-ansible/hooks/pre_compute.yml - name: note the start of a compute machine update debug: msg: \"Compute machine upgrade of {{ inventory_hostname }} is about to start\" - name: require the user agree to start an upgrade pause: prompt: \"Press Enter to start the compute machine update\"", "[all:vars] openshift_node_pre_upgrade_hook=/home/user/openshift-ansible/hooks/pre_node.yml openshift_node_post_upgrade_hook=/home/user/openshift-ansible/hooks/post_node.yml", "systemctl disable --now firewalld.service", "subscription-manager repos --disable=rhel-7-server-ose-4.8-rpms --enable=rhel-7-server-ansible-2.9-rpms --enable=rhel-7-server-ose-4.9-rpms", "yum update openshift-ansible openshift-clients", "subscription-manager repos --disable=rhel-7-server-ose-4.8-rpms --enable=rhel-7-server-ose-4.9-rpms --enable=rhel-7-fast-datapath-rpms --enable=rhel-7-server-optional-rpms", "oc get node", "NAME STATUS ROLES AGE VERSION mycluster-control-plane-0 Ready master 145m v1.22.1 mycluster-control-plane-1 Ready master 145m v1.22.1 mycluster-control-plane-2 Ready master 145m v1.22.1 mycluster-rhel7-0 NotReady,SchedulingDisabled worker 98m v1.22.1 mycluster-rhel7-1 Ready worker 98m v1.22.1 mycluster-rhel7-2 Ready worker 98m v1.22.1 mycluster-rhel7-3 Ready worker 98m v1.22.1", "[all:vars] ansible_user=root #ansible_become=True openshift_kubeconfig_path=\"~/.kube/config\" [workers] mycluster-rhel7-0.example.com", "cd /usr/share/ansible/openshift-ansible", "ansible-playbook -i /<path>/inventory/hosts playbooks/upgrade.yml 1", "oc get node", "NAME STATUS ROLES AGE VERSION mycluster-control-plane-0 Ready master 145m v1.22.1 mycluster-control-plane-1 Ready master 145m v1.22.1 mycluster-control-plane-2 Ready master 145m v1.22.1 mycluster-rhel7-0 NotReady,SchedulingDisabled worker 98m v1.22.1 mycluster-rhel7-1 Ready worker 98m v1.22.1 mycluster-rhel7-2 Ready worker 98m v1.22.1 mycluster-rhel7-3 Ready worker 98m v1.22.1", "yum update" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.9/html/updating_clusters/updating-cluster-rhel-compute
Chapter 48. Paho MQTT 5
Chapter 48. Paho MQTT 5 Both producer and consumer are supported Paho MQTT5 component provides connector for the MQTT messaging protocol using the Eclipse Paho library with MQTT v5. Paho is one of the most popular MQTT libraries, so if you would like to integrate it with your Java project - Camel Paho connector is a way to go. Maven users will need to add the following dependency to their pom.xml for this component: <dependency> <groupId>org.apache.camel</groupId> <artifactId>camel-paho-mqtt5</artifactId> <version>{CamelSBVersion}</version> <!-- use the same version as your Camel core version --> </dependency> 48.1. URI format Where topic is the name of the topic. 48.2. Configuring Options Camel components are configured on two separate levels: component level endpoint level 48.2.1. Configuring Component Options The component level is the highest level which holds general and common configurations that are inherited by the endpoints. For example a component may have security settings, credentials for authentication, urls for network connection and so forth. Some components only have a few options, and others may have many. Because components typically have pre configured defaults that are commonly used, then you may often only need to configure a few options on a component; or none at all. Configuring components can be done with the Component DSL , in a configuration file (application.properties|yaml), or directly with Java code. 48.2.2. Configuring Endpoint Options Where you find yourself configuring the most is on endpoints, as endpoints often have many options, which allows you to configure what you need the endpoint to do. The options are also categorized into whether the endpoint is used as consumer (from) or as a producer (to), or used for both. Configuring endpoints is most often done directly in the endpoint URI as path and query parameters. You can also use the Endpoint DSL as a type safe way of configuring endpoints. A good practice when configuring options is to use Property Placeholders , which allows to not hardcode urls, port numbers, sensitive information, and other settings. In other words placeholders allows to externalize the configuration from your code, and gives more flexibility and reuse. The following two sections lists all the options, firstly for the component followed by the endpoint. 48.3. Component Options The Paho MQTT 5 component supports 32 options, which are listed below. Name Description Default Type automaticReconnect (common) Sets whether the client will automatically attempt to reconnect to the server if the connection is lost. If set to false, the client will not attempt to automatically reconnect to the server in the event that the connection is lost. If set to true, in the event that the connection is lost, the client will attempt to reconnect to the server. It will initially wait 1 second before it attempts to reconnect, for every failed reconnect attempt, the delay will double until it is at 2 minutes at which point the delay will stay at 2 minutes. true boolean brokerUrl (common) The URL of the MQTT broker. tcp://localhost:1883 String cleanStart (common) Sets whether the client and server should remember state across restarts and reconnects. If set to false both the client and server will maintain state across restarts of the client, the server and the connection. As state is maintained: Message delivery will be reliable meeting the specified QOS even if the client, server or connection are restarted. The server will treat a subscription as durable. If set to true the client and server will not maintain state across restarts of the client, the server or the connection. This means Message delivery to the specified QOS cannot be maintained if the client, server or connection are restarted The server will treat a subscription as non-durable. true boolean clientId (common) MQTT client identifier. The identifier must be unique. String configuration (common) To use the shared Paho configuration. PahoMqtt5Configuration connectionTimeout (common) Sets the connection timeout value. This value, measured in seconds, defines the maximum time interval the client will wait for the network connection to the MQTT server to be established. The default timeout is 30 seconds. A value of 0 disables timeout processing meaning the client will wait until the network connection is made successfully or fails. 30 int filePersistenceDirectory (common) Base directory used by file persistence. Will by default use user directory. String keepAliveInterval (common) Sets the keep alive interval. This value, measured in seconds, defines the maximum time interval between messages sent or received. It enables the client to detect if the server is no longer available, without having to wait for the TCP/IP timeout. The client will ensure that at least one message travels across the network within each keep alive period. In the absence of a data-related message during the time period, the client sends a very small ping message, which the server will acknowledge. A value of 0 disables keepalive processing in the client. The default value is 60 seconds. 60 int maxReconnectDelay (common) Get the maximum time (in millis) to wait between reconnects. 128000 int persistence (common) Client persistence to be used - memory or file. Enum values: FILE MEMORY MEMORY PahoMqtt5Persistence qos (common) Client quality of service level (0-2). 2 int receiveMaximum (common) Sets the Receive Maximum. This value represents the limit of QoS 1 and QoS 2 publications that the client is willing to process concurrently. There is no mechanism to limit the number of QoS 0 publications that the Server might try to send. The default value is 65535. 65535 int retained (common) Retain option. false boolean serverURIs (common) Set a list of one or more serverURIs the client may connect to. Multiple servers can be separated by comma. Each serverURI specifies the address of a server that the client may connect to. Two types of connection are supported tcp:// for a TCP connection and ssl:// for a TCP connection secured by SSL/TLS. For example: tcp://localhost:1883 ssl://localhost:8883 If the port is not specified, it will default to 1883 for tcp:// URIs, and 8883 for ssl:// URIs. If serverURIs is set then it overrides the serverURI parameter passed in on the constructor of the MQTT client. When an attempt to connect is initiated the client will start with the first serverURI in the list and work through the list until a connection is established with a server. If a connection cannot be made to any of the servers then the connect attempt fails. Specifying a list of servers that a client may connect to has several uses: High Availability and reliable message delivery Some MQTT servers support a high availability feature where two or more equal MQTT servers share state. An MQTT client can connect to any of the equal servers and be assured that messages are reliably delivered and durable subscriptions are maintained no matter which server the client connects to. The cleansession flag must be set to false if durable subscriptions and/or reliable message delivery is required. Hunt List A set of servers may be specified that are not equal (as in the high availability option). As no state is shared across the servers reliable message delivery and durable subscriptions are not valid. The cleansession flag must be set to true if the hunt list mode is used. String sessionExpiryInterval (common) Sets the Session Expiry Interval. This value, measured in seconds, defines the maximum time that the broker will maintain the session for once the client disconnects. Clients should only connect with a long Session Expiry interval if they intend to connect to the server at some later point in time. By default this value is -1 and so will not be sent, in this case, the session will not expire. If a 0 is sent, the session will end immediately once the Network Connection is closed. When the client has determined that it has no longer any use for the session, it should disconnect with a Session Expiry Interval set to 0. -1 long willMqttProperties (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The MQTT properties set for the message. MqttProperties willPayload (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The byte payload for the message. String willQos (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The quality of service to publish the message at (0, 1 or 2). 1 int willRetained (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. Whether or not the message should be retained. false boolean willTopic (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The topic to publish to. String bridgeErrorHandler (consumer) Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. false boolean lazyStartProducer (producer) Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. false boolean autowiredEnabled (advanced) Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc. true boolean client (advanced) To use a shared Paho client. MqttClient customWebSocketHeaders (advanced) Sets the Custom WebSocket Headers for the WebSocket Connection. Map executorServiceTimeout (advanced) Set the time in seconds that the executor service should wait when terminating before forcefully terminating. It is not recommended to change this value unless you are absolutely sure that you need to. 1 int httpsHostnameVerificationEnabled (security) Whether SSL HostnameVerifier is enabled or not. The default value is true. true boolean password (security) Password to be used for authentication against the MQTT broker. String socketFactory (security) Sets the SocketFactory to use. This allows an application to apply its own policies around the creation of network sockets. If using an SSL connection, an SSLSocketFactory can be used to supply application-specific security settings. SocketFactory sslClientProps (security) Sets the SSL properties for the connection. Note that these properties are only valid if an implementation of the Java Secure Socket Extensions (JSSE) is available. These properties are not used if a custom SocketFactory has been set. The following properties can be used: com.ibm.ssl.protocol One of: SSL, SSLv3, TLS, TLSv1, SSL_TLS. com.ibm.ssl.contextProvider Underlying JSSE provider. For example IBMJSSE2 or SunJSSE com.ibm.ssl.keyStore The name of the file that contains the KeyStore object that you want the KeyManager to use. For example /mydir/etc/key.p12 com.ibm.ssl.keyStorePassword The password for the KeyStore object that you want the KeyManager to use. The password can either be in plain-text, or may be obfuscated using the static method: com.ibm.micro.security.Password.obfuscate(char password). This obfuscates the password using a simple and insecure XOR and Base64 encoding mechanism. Note that this is only a simple scrambler to obfuscate clear-text passwords. com.ibm.ssl.keyStoreType Type of key store, for example PKCS12, JKS, or JCEKS. com.ibm.ssl.keyStoreProvider Key store provider, for example IBMJCE or IBMJCEFIPS. com.ibm.ssl.trustStore The name of the file that contains the KeyStore object that you want the TrustManager to use. com.ibm.ssl.trustStorePassword The password for the TrustStore object that you want the TrustManager to use. The password can either be in plain-text, or may be obfuscated using the static method: com.ibm.micro.security.Password.obfuscate(char password). This obfuscates the password using a simple and insecure XOR and Base64 encoding mechanism. Note that this is only a simple scrambler to obfuscate clear-text passwords. com.ibm.ssl.trustStoreType The type of KeyStore object that you want the default TrustManager to use. Same possible values as keyStoreType. com.ibm.ssl.trustStoreProvider Trust store provider, for example IBMJCE or IBMJCEFIPS. com.ibm.ssl.enabledCipherSuites A list of which ciphers are enabled. Values are dependent on the provider, for example: SSL_RSA_WITH_AES_128_CBC_SHA;SSL_RSA_WITH_3DES_EDE_CBC_SHA. com.ibm.ssl.keyManager Sets the algorithm that will be used to instantiate a KeyManagerFactory object instead of using the default algorithm available in the platform. Example values: IbmX509 or IBMJ9X509. com.ibm.ssl.trustManager Sets the algorithm that will be used to instantiate a TrustManagerFactory object instead of using the default algorithm available in the platform. Example values: PKIX or IBMJ9X509. Properties sslHostnameVerifier (security) Sets the HostnameVerifier for the SSL connection. Note that it will be used after handshake on a connection and you should do actions by yourself when hostname is verified error. There is no default HostnameVerifier. HostnameVerifier userName (security) Username to be used for authentication against the MQTT broker. String 48.4. Endpoint Options The Paho MQTT 5 endpoint is configured using URI syntax: with the following path and query parameters: 48.4.1. Path Parameters (1 parameters) Name Description Default Type topic (common) Required Name of the topic. String 48.4.2. Query Parameters (32 parameters) Name Description Default Type automaticReconnect (common) Sets whether the client will automatically attempt to reconnect to the server if the connection is lost. If set to false, the client will not attempt to automatically reconnect to the server in the event that the connection is lost. If set to true, in the event that the connection is lost, the client will attempt to reconnect to the server. It will initially wait 1 second before it attempts to reconnect, for every failed reconnect attempt, the delay will double until it is at 2 minutes at which point the delay will stay at 2 minutes. true boolean brokerUrl (common) The URL of the MQTT broker. tcp://localhost:1883 String cleanStart (common) Sets whether the client and server should remember state across restarts and reconnects. If set to false both the client and server will maintain state across restarts of the client, the server and the connection. As state is maintained: Message delivery will be reliable meeting the specified QOS even if the client, server or connection are restarted. The server will treat a subscription as durable. If set to true the client and server will not maintain state across restarts of the client, the server or the connection. This means Message delivery to the specified QOS cannot be maintained if the client, server or connection are restarted The server will treat a subscription as non-durable. true boolean clientId (common) MQTT client identifier. The identifier must be unique. String connectionTimeout (common) Sets the connection timeout value. This value, measured in seconds, defines the maximum time interval the client will wait for the network connection to the MQTT server to be established. The default timeout is 30 seconds. A value of 0 disables timeout processing meaning the client will wait until the network connection is made successfully or fails. 30 int filePersistenceDirectory (common) Base directory used by file persistence. Will by default use user directory. String keepAliveInterval (common) Sets the keep alive interval. This value, measured in seconds, defines the maximum time interval between messages sent or received. It enables the client to detect if the server is no longer available, without having to wait for the TCP/IP timeout. The client will ensure that at least one message travels across the network within each keep alive period. In the absence of a data-related message during the time period, the client sends a very small ping message, which the server will acknowledge. A value of 0 disables keepalive processing in the client. The default value is 60 seconds. 60 int maxReconnectDelay (common) Get the maximum time (in millis) to wait between reconnects. 128000 int persistence (common) Client persistence to be used - memory or file. Enum values: FILE MEMORY MEMORY PahoMqtt5Persistence qos (common) Client quality of service level (0-2). 2 int receiveMaximum (common) Sets the Receive Maximum. This value represents the limit of QoS 1 and QoS 2 publications that the client is willing to process concurrently. There is no mechanism to limit the number of QoS 0 publications that the Server might try to send. The default value is 65535. 65535 int retained (common) Retain option. false boolean serverURIs (common) Set a list of one or more serverURIs the client may connect to. Multiple servers can be separated by comma. Each serverURI specifies the address of a server that the client may connect to. Two types of connection are supported tcp:// for a TCP connection and ssl:// for a TCP connection secured by SSL/TLS. For example: tcp://localhost:1883 ssl://localhost:8883 If the port is not specified, it will default to 1883 for tcp:// URIs, and 8883 for ssl:// URIs. If serverURIs is set then it overrides the serverURI parameter passed in on the constructor of the MQTT client. When an attempt to connect is initiated the client will start with the first serverURI in the list and work through the list until a connection is established with a server. If a connection cannot be made to any of the servers then the connect attempt fails. Specifying a list of servers that a client may connect to has several uses: High Availability and reliable message delivery Some MQTT servers support a high availability feature where two or more equal MQTT servers share state. An MQTT client can connect to any of the equal servers and be assured that messages are reliably delivered and durable subscriptions are maintained no matter which server the client connects to. The cleansession flag must be set to false if durable subscriptions and/or reliable message delivery is required. Hunt List A set of servers may be specified that are not equal (as in the high availability option). As no state is shared across the servers reliable message delivery and durable subscriptions are not valid. The cleansession flag must be set to true if the hunt list mode is used. String sessionExpiryInterval (common) Sets the Session Expiry Interval. This value, measured in seconds, defines the maximum time that the broker will maintain the session for once the client disconnects. Clients should only connect with a long Session Expiry interval if they intend to connect to the server at some later point in time. By default this value is -1 and so will not be sent, in this case, the session will not expire. If a 0 is sent, the session will end immediately once the Network Connection is closed. When the client has determined that it has no longer any use for the session, it should disconnect with a Session Expiry Interval set to 0. -1 long willMqttProperties (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The MQTT properties set for the message. MqttProperties willPayload (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The byte payload for the message. String willQos (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The quality of service to publish the message at (0, 1 or 2). 1 int willRetained (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. Whether or not the message should be retained. false boolean willTopic (common) Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The topic to publish to. String bridgeErrorHandler (consumer) Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. false boolean exceptionHandler (consumer (advanced)) To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this option is not in use. By default the consumer will deal with exceptions, that will be logged at WARN or ERROR level and ignored. ExceptionHandler exchangePattern (consumer (advanced)) Sets the exchange pattern when the consumer creates an exchange. Enum values: InOnly InOut InOptionalOut ExchangePattern lazyStartProducer (producer) Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. false boolean client (advanced) To use an existing mqtt client. MqttClient customWebSocketHeaders (advanced) Sets the Custom WebSocket Headers for the WebSocket Connection. Map executorServiceTimeout (advanced) Set the time in seconds that the executor service should wait when terminating before forcefully terminating. It is not recommended to change this value unless you are absolutely sure that you need to. 1 int httpsHostnameVerificationEnabled (security) Whether SSL HostnameVerifier is enabled or not. The default value is true. true boolean password (security) Password to be used for authentication against the MQTT broker. String socketFactory (security) Sets the SocketFactory to use. This allows an application to apply its own policies around the creation of network sockets. If using an SSL connection, an SSLSocketFactory can be used to supply application-specific security settings. SocketFactory sslClientProps (security) Sets the SSL properties for the connection. Note that these properties are only valid if an implementation of the Java Secure Socket Extensions (JSSE) is available. These properties are not used if a custom SocketFactory has been set. The following properties can be used: com.ibm.ssl.protocol One of: SSL, SSLv3, TLS, TLSv1, SSL_TLS. com.ibm.ssl.contextProvider Underlying JSSE provider. For example IBMJSSE2 or SunJSSE com.ibm.ssl.keyStore The name of the file that contains the KeyStore object that you want the KeyManager to use. For example /mydir/etc/key.p12 com.ibm.ssl.keyStorePassword The password for the KeyStore object that you want the KeyManager to use. The password can either be in plain-text, or may be obfuscated using the static method: com.ibm.micro.security.Password.obfuscate(char password). This obfuscates the password using a simple and insecure XOR and Base64 encoding mechanism. Note that this is only a simple scrambler to obfuscate clear-text passwords. com.ibm.ssl.keyStoreType Type of key store, for example PKCS12, JKS, or JCEKS. com.ibm.ssl.keyStoreProvider Key store provider, for example IBMJCE or IBMJCEFIPS. com.ibm.ssl.trustStore The name of the file that contains the KeyStore object that you want the TrustManager to use. com.ibm.ssl.trustStorePassword The password for the TrustStore object that you want the TrustManager to use. The password can either be in plain-text, or may be obfuscated using the static method: com.ibm.micro.security.Password.obfuscate(char password). This obfuscates the password using a simple and insecure XOR and Base64 encoding mechanism. Note that this is only a simple scrambler to obfuscate clear-text passwords. com.ibm.ssl.trustStoreType The type of KeyStore object that you want the default TrustManager to use. Same possible values as keyStoreType. com.ibm.ssl.trustStoreProvider Trust store provider, for example IBMJCE or IBMJCEFIPS. com.ibm.ssl.enabledCipherSuites A list of which ciphers are enabled. Values are dependent on the provider, for example: SSL_RSA_WITH_AES_128_CBC_SHA;SSL_RSA_WITH_3DES_EDE_CBC_SHA. com.ibm.ssl.keyManager Sets the algorithm that will be used to instantiate a KeyManagerFactory object instead of using the default algorithm available in the platform. Example values: IbmX509 or IBMJ9X509. com.ibm.ssl.trustManager Sets the algorithm that will be used to instantiate a TrustManagerFactory object instead of using the default algorithm available in the platform. Example values: PKIX or IBMJ9X509. Properties sslHostnameVerifier (security) Sets the HostnameVerifier for the SSL connection. Note that it will be used after handshake on a connection and you should do actions by yourself when hostname is verified error. There is no default HostnameVerifier. HostnameVerifier userName (security) Username to be used for authentication against the MQTT broker. String 48.5. Headers The following headers are recognized by the Paho component: Header Java constant Endpoint type Value type Description CamelMqttTopic PahoConstants.MQTT_TOPIC Consumer String The name of the topic CamelMqttQoS PahoConstants.MQTT_QOS Consumer Integer QualityOfService of the incoming message CamelPahoOverrideTopic PahoConstants.CAMEL_PAHO_OVERRIDE_TOPIC Producer String Name of topic to override and send to instead of topic specified on endpoint 48.6. Default payload type By default Camel Paho component operates on the binary payloads extracted out of (or put into) the MQTT message: // Receive payload byte[] payload = (byte[]) consumerTemplate.receiveBody("paho:topic"); // Send payload byte[] payload = "message".getBytes(); producerTemplate.sendBody("paho:topic", payload); But of course Camel build-in type conversion API can perform the automatic data type transformations for you. In the example below Camel automatically converts binary payload into String (and conversely): // Receive payload String payload = consumerTemplate.receiveBody("paho:topic", String.class); // Send payload String payload = "message"; producerTemplate.sendBody("paho:topic", payload); 48.7. Samples For example the following snippet reads messages from the MQTT broker installed on the same host as the Camel router: from("paho:some/queue") .to("mock:test"); While the snippet below sends message to the MQTT broker: from("direct:test") .to("paho:some/target/queue"); For example this is how to read messages from the remote MQTT broker: from("paho:some/queue?brokerUrl=tcp://iot.eclipse.org:1883") .to("mock:test"); And here we override the default topic and set to a dynamic topic from("direct:test") .setHeader(PahoConstants.CAMEL_PAHO_OVERRIDE_TOPIC, simple("USD{header.customerId}")) .to("paho:some/target/queue"); 48.8. Spring Boot Auto-Configuration When using paho-mqtt5 with Spring Boot make sure to use the following Maven dependency to have support for auto configuration: <dependency> <groupId>org.apache.camel.springboot</groupId> <artifactId>camel-paho-mqtt5-starter</artifactId> </dependency> The component supports 33 options, which are listed below. Name Description Default Type camel.component.paho-mqtt5.automatic-reconnect Sets whether the client will automatically attempt to reconnect to the server if the connection is lost. If set to false, the client will not attempt to automatically reconnect to the server in the event that the connection is lost. If set to true, in the event that the connection is lost, the client will attempt to reconnect to the server. It will initially wait 1 second before it attempts to reconnect, for every failed reconnect attempt, the delay will double until it is at 2 minutes at which point the delay will stay at 2 minutes. true Boolean camel.component.paho-mqtt5.autowired-enabled Whether autowiring is enabled. This is used for automatic autowiring options (the option must be marked as autowired) by looking up in the registry to find if there is a single instance of matching type, which then gets configured on the component. This can be used for automatic configuring JDBC data sources, JMS connection factories, AWS Clients, etc. true Boolean camel.component.paho-mqtt5.bridge-error-handler Allows for bridging the consumer to the Camel routing Error Handler, which mean any exceptions occurred while the consumer is trying to pickup incoming messages, or the likes, will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that will be logged at WARN or ERROR level and ignored. false Boolean camel.component.paho-mqtt5.broker-url The URL of the MQTT broker. tcp://localhost:1883 String camel.component.paho-mqtt5.clean-start Sets whether the client and server should remember state across restarts and reconnects. If set to false both the client and server will maintain state across restarts of the client, the server and the connection. As state is maintained: Message delivery will be reliable meeting the specified QOS even if the client, server or connection are restarted. The server will treat a subscription as durable. If set to true the client and server will not maintain state across restarts of the client, the server or the connection. This means Message delivery to the specified QOS cannot be maintained if the client, server or connection are restarted The server will treat a subscription as non-durable. true Boolean camel.component.paho-mqtt5.client To use a shared Paho client. The option is a org.eclipse.paho.mqttv5.client.MqttClient type. MqttClient camel.component.paho-mqtt5.client-id MQTT client identifier. The identifier must be unique. String camel.component.paho-mqtt5.configuration To use the shared Paho configuration. The option is a org.apache.camel.component.paho.mqtt5.PahoMqtt5Configuration type. PahoMqtt5Configuration camel.component.paho-mqtt5.connection-timeout Sets the connection timeout value. This value, measured in seconds, defines the maximum time interval the client will wait for the network connection to the MQTT server to be established. The default timeout is 30 seconds. A value of 0 disables timeout processing meaning the client will wait until the network connection is made successfully or fails. 30 Integer camel.component.paho-mqtt5.custom-web-socket-headers Sets the Custom WebSocket Headers for the WebSocket Connection. Map camel.component.paho-mqtt5.enabled Whether to enable auto configuration of the paho-mqtt5 component. This is enabled by default. Boolean camel.component.paho-mqtt5.executor-service-timeout Set the time in seconds that the executor service should wait when terminating before forcefully terminating. It is not recommended to change this value unless you are absolutely sure that you need to. 1 Integer camel.component.paho-mqtt5.file-persistence-directory Base directory used by file persistence. Will by default use user directory. String camel.component.paho-mqtt5.https-hostname-verification-enabled Whether SSL HostnameVerifier is enabled or not. The default value is true. true Boolean camel.component.paho-mqtt5.keep-alive-interval Sets the keep alive interval. This value, measured in seconds, defines the maximum time interval between messages sent or received. It enables the client to detect if the server is no longer available, without having to wait for the TCP/IP timeout. The client will ensure that at least one message travels across the network within each keep alive period. In the absence of a data-related message during the time period, the client sends a very small ping message, which the server will acknowledge. A value of 0 disables keepalive processing in the client. The default value is 60 seconds. 60 Integer camel.component.paho-mqtt5.lazy-start-producer Whether the producer should be started lazy (on the first message). By starting lazy you can use this to allow CamelContext and routes to startup in situations where a producer may otherwise fail during starting and cause the route to fail being started. By deferring this startup to be lazy then the startup failure can be handled during routing messages via Camel's routing error handlers. Beware that when the first message is processed then creating and starting the producer may take a little time and prolong the total processing time of the processing. false Boolean camel.component.paho-mqtt5.max-reconnect-delay Get the maximum time (in millis) to wait between reconnects. 128000 Integer camel.component.paho-mqtt5.password Password to be used for authentication against the MQTT broker. String camel.component.paho-mqtt5.persistence Client persistence to be used - memory or file. PahoMqtt5Persistence camel.component.paho-mqtt5.qos Client quality of service level (0-2). 2 Integer camel.component.paho-mqtt5.receive-maximum Sets the Receive Maximum. This value represents the limit of QoS 1 and QoS 2 publications that the client is willing to process concurrently. There is no mechanism to limit the number of QoS 0 publications that the Server might try to send. The default value is 65535. 65535 Integer camel.component.paho-mqtt5.retained Retain option. false Boolean camel.component.paho-mqtt5.server-u-r-is Set a list of one or more serverURIs the client may connect to. Multiple servers can be separated by comma. Each serverURI specifies the address of a server that the client may connect to. Two types of connection are supported tcp:// for a TCP connection and ssl:// for a TCP connection secured by SSL/TLS. For example: tcp://localhost:1883 ssl://localhost:8883 If the port is not specified, it will default to 1883 for tcp:// URIs, and 8883 for ssl:// URIs. If serverURIs is set then it overrides the serverURI parameter passed in on the constructor of the MQTT client. When an attempt to connect is initiated the client will start with the first serverURI in the list and work through the list until a connection is established with a server. If a connection cannot be made to any of the servers then the connect attempt fails. Specifying a list of servers that a client may connect to has several uses: High Availability and reliable message delivery Some MQTT servers support a high availability feature where two or more equal MQTT servers share state. An MQTT client can connect to any of the equal servers and be assured that messages are reliably delivered and durable subscriptions are maintained no matter which server the client connects to. The cleansession flag must be set to false if durable subscriptions and/or reliable message delivery is required. Hunt List A set of servers may be specified that are not equal (as in the high availability option). As no state is shared across the servers reliable message delivery and durable subscriptions are not valid. The cleansession flag must be set to true if the hunt list mode is used. String camel.component.paho-mqtt5.session-expiry-interval Sets the Session Expiry Interval. This value, measured in seconds, defines the maximum time that the broker will maintain the session for once the client disconnects. Clients should only connect with a long Session Expiry interval if they intend to connect to the server at some later point in time. By default this value is -1 and so will not be sent, in this case, the session will not expire. If a 0 is sent, the session will end immediately once the Network Connection is closed. When the client has determined that it has no longer any use for the session, it should disconnect with a Session Expiry Interval set to 0. -1 Long camel.component.paho-mqtt5.socket-factory Sets the SocketFactory to use. This allows an application to apply its own policies around the creation of network sockets. If using an SSL connection, an SSLSocketFactory can be used to supply application-specific security settings. The option is a javax.net.SocketFactory type. SocketFactory camel.component.paho-mqtt5.ssl-client-props Sets the SSL properties for the connection. Note that these properties are only valid if an implementation of the Java Secure Socket Extensions (JSSE) is available. These properties are not used if a custom SocketFactory has been set. The following properties can be used: com.ibm.ssl.protocol One of: SSL, SSLv3, TLS, TLSv1, SSL_TLS. com.ibm.ssl.contextProvider Underlying JSSE provider. For example IBMJSSE2 or SunJSSE com.ibm.ssl.keyStore The name of the file that contains the KeyStore object that you want the KeyManager to use. For example /mydir/etc/key.p12 com.ibm.ssl.keyStorePassword The password for the KeyStore object that you want the KeyManager to use. The password can either be in plain-text, or may be obfuscated using the static method: com.ibm.micro.security.Password.obfuscate(char password). This obfuscates the password using a simple and insecure XOR and Base64 encoding mechanism. Note that this is only a simple scrambler to obfuscate clear-text passwords. com.ibm.ssl.keyStoreType Type of key store, for example PKCS12, JKS, or JCEKS. com.ibm.ssl.keyStoreProvider Key store provider, for example IBMJCE or IBMJCEFIPS. com.ibm.ssl.trustStore The name of the file that contains the KeyStore object that you want the TrustManager to use. com.ibm.ssl.trustStorePassword The password for the TrustStore object that you want the TrustManager to use. The password can either be in plain-text, or may be obfuscated using the static method: com.ibm.micro.security.Password.obfuscate(char password). This obfuscates the password using a simple and insecure XOR and Base64 encoding mechanism. Note that this is only a simple scrambler to obfuscate clear-text passwords. com.ibm.ssl.trustStoreType The type of KeyStore object that you want the default TrustManager to use. Same possible values as keyStoreType. com.ibm.ssl.trustStoreProvider Trust store provider, for example IBMJCE or IBMJCEFIPS. com.ibm.ssl.enabledCipherSuites A list of which ciphers are enabled. Values are dependent on the provider, for example: SSL_RSA_WITH_AES_128_CBC_SHA;SSL_RSA_WITH_3DES_EDE_CBC_SHA. com.ibm.ssl.keyManager Sets the algorithm that will be used to instantiate a KeyManagerFactory object instead of using the default algorithm available in the platform. Example values: IbmX509 or IBMJ9X509. com.ibm.ssl.trustManager Sets the algorithm that will be used to instantiate a TrustManagerFactory object instead of using the default algorithm available in the platform. Example values: PKIX or IBMJ9X509. The option is a java.util.Properties type. Properties camel.component.paho-mqtt5.ssl-hostname-verifier Sets the HostnameVerifier for the SSL connection. Note that it will be used after handshake on a connection and you should do actions by yourself when hostname is verified error. There is no default HostnameVerifier. The option is a javax.net.ssl.HostnameVerifier type. HostnameVerifier camel.component.paho-mqtt5.user-name Username to be used for authentication against the MQTT broker. String camel.component.paho-mqtt5.will-mqtt-properties Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The MQTT properties set for the message. The option is a org.eclipse.paho.mqttv5.common.packet.MqttProperties type. MqttProperties camel.component.paho-mqtt5.will-payload Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The byte payload for the message. String camel.component.paho-mqtt5.will-qos Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The quality of service to publish the message at (0, 1 or 2). 1 Integer camel.component.paho-mqtt5.will-retained Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. Whether or not the message should be retained. false Boolean camel.component.paho-mqtt5.will-topic Sets the Last Will and Testament (LWT) for the connection. In the event that this client unexpectedly loses its connection to the server, the server will publish a message to itself using the supplied details. The topic to publish to. String
[ "<dependency> <groupId>org.apache.camel</groupId> <artifactId>camel-paho-mqtt5</artifactId> <version>{CamelSBVersion}</version> <!-- use the same version as your Camel core version --> </dependency>", "paho-mqtt5:topic[?options]", "paho-mqtt5:topic", "// Receive payload byte[] payload = (byte[]) consumerTemplate.receiveBody(\"paho:topic\"); // Send payload byte[] payload = \"message\".getBytes(); producerTemplate.sendBody(\"paho:topic\", payload);", "// Receive payload String payload = consumerTemplate.receiveBody(\"paho:topic\", String.class); // Send payload String payload = \"message\"; producerTemplate.sendBody(\"paho:topic\", payload);", "from(\"paho:some/queue\") .to(\"mock:test\");", "from(\"direct:test\") .to(\"paho:some/target/queue\");", "from(\"paho:some/queue?brokerUrl=tcp://iot.eclipse.org:1883\") .to(\"mock:test\");", "from(\"direct:test\") .setHeader(PahoConstants.CAMEL_PAHO_OVERRIDE_TOPIC, simple(\"USD{header.customerId}\")) .to(\"paho:some/target/queue\");", "<dependency> <groupId>org.apache.camel.springboot</groupId> <artifactId>camel-paho-mqtt5-starter</artifactId> </dependency>" ]
https://docs.redhat.com/en/documentation/red_hat_build_of_apache_camel_for_spring_boot/3.20/html/camel_spring_boot_reference/csb-camel-paho-mqtt5-component-starter
Chapter 15. Deploying and using Red Hat build of OptaPlanner in Red Hat OpenShift Container Platform: an employee rostering starter example
Chapter 15. Deploying and using Red Hat build of OptaPlanner in Red Hat OpenShift Container Platform: an employee rostering starter example As a business rules developer, you can test and interact with the Red Hat build of OptaPlanner functionality by quickly deploying the optaweb-employee-rostering starter project included in the Red Hat Process Automation Manager distribution to OpenShift. Prerequisites You have access to a deployed OpenShift environment. For details, see the documentation for the OpenShift product that you use. 15.1. Overview of the employee rostering starter application The employee rostering starter application assigns employees to shifts on various positions in an organization. For example, you can use the application to distribute shifts in a hospital between nurses, guard duty shifts across a number of locations, or shifts on an assembly line between workers. Optimal employee rostering must take a number of variables into account. For example, different skills can be required for shifts in different positions. Also, some employees might be unavailable for some time slots or might prefer a particular time slot. Moreover, an employee can have a contract that limits the number of hours that the employee can work in a single time period. The Red Hat build of OptaPlanner rules for this starter application use both hard and soft constraints. During an optimization, the planning engine may not violate hard constraints, for example, if an employee is unavailable (out sick), or that an employee cannot work two spots in a single shift. The planning engine tries to adhere to soft constraints, such as an employee's preference to not work a specific shift, but can violate them if the optimal solution requires it. 15.2. Installing and starting the employee rostering starter application on OpenShift Use the runOnOpenShift.sh script to deploy the Employee Rostering starter application to Red Hat OpenShift Container Platform. The runOnOpenShift.sh shell script is available in the Red Hat Process Automation Manager 7.13.5 Kogito and OptaPlanner 8 Decision Services Quickstarts distribution. The runOnOpenShift.sh script builds and packages the application source code locally and uploads it to the OpenShift environment for deployment. This method requires Java Development Kit, Apache Maven, and a bash shell command line. 15.2.1. Deploying the application using the provided script You can deploy the Employee Rostering starter application to Red Hat OpenShift Container Platform using the provided script. The script builds and packages the application source code locally and uploads it to the OpenShift environment for deployment. Prerequisites You are logged in to the target OpenShift environment using the oc command line tool. For more information about this tool, see the OpenShift Container Platform CLI Reference . OpenJDK 11 or later is installed. Red Hat build of Open JDK is available from the Software Downloads page in the Red Hat Customer Portal (login required). Apache Maven 3.6 or higher is installed. Maven is available from the Apache Maven Project website. A bash shell environment is available on your local system. Procedure Navigate to the Software Downloads page in the Red Hat Customer Portal (login required), and select the product and version from the drop-down options: Product: Process Automation Manager Version: 7.13.5 Download the Red Hat Process Automation Manager 7.13 Maven Repository Kogito and OptaPlanner 8 Maven Repository ( rhpam-7.13.5-kogito-maven-repository.zip ) file. Extract the rhpam-7.13.5-kogito-maven-repository.zip file. Copy the contents of the rhpam-7.13.5-kogito-maven-repository/maven-repository subdirectory into the ~/.m2/repository directory. Download the rhpam-7.13.5-kogito-and-optaplanner-quickstarts.zip file from the Software Downloads page of the Red Hat Customer Portal. Extract the downloaded archive. Navigate to the optaweb-employee-rostering folder. To build the Employee Rostering application, run the following command: Log in to an OpenShift account or a Red Hat Code Ready Container instance. In the following example, <account-url> is the URL for an OpenShift account or Red Hat Code Ready Container instance and <login-token> is the login token for that account: Create a new project to host Employee Rostering: Run the provision script to build and deploy the application: Compilation and packaging might take up to 10 minutes to complete. These processes continually show progress on the command line output. When the operation completes, the following message is displayed, where <URL> is the URL for the deployment: Enter the URL that you used earlier in the procedure, for either an OpenShift account or Red Hat Code Ready Container instance, to access the deployed application. The first startup can take up to a minute because additional building is completed on the OpenShift platform. Note If the application does not open a minute after clicking the link, perform a hard refresh of your browser page. 15.3. Using the employee rostering starter application You can use the web interface to use the Employee Rostering starter application. The interface is developed in ReactJS. You can also access the REST API to create a custom user interface as necessary. 15.3.1. The draft and published periods At any particular moment, you can use the application to create the roster for a time period, called a draft period. By default, the length of a draft period is three weeks. When the roster is final for the first week of the draft period, you can publish the roster. At this time, the roster for the first week of the current draft period becomes a published period. In a published period, the roster is fixed and you can no longer change it automatically (however, emergency manual changes are still possible). This roster can then be distributed to employees so they can plan their time around it. The draft period is shifted a week later. For example, assume that a draft period of September 1 to September 21 is set. You can automatically create the employee roster for this period. Then, when you publish the roster, the period up to September 7 becomes published. The new draft period is September 8 to September 28. For instructions about publishing the roster, see Section 15.3.12, "Publishing the shift roster" . 15.3.2. The rotation pattern The employee rostering application supports a rotation pattern for shifts and employees. The rotation pattern is a "model" period of any time starting from two days. The pattern is not tied to a particular date. You can create time buckets for every day of the rotation. Every time bucket sets the time of a shift. Optionally, the template can include the name of the default employee for the shift. When you publish the roster, the application adds a new week to the draft period. At this time, the shifts and, if applicable, default employee names are copied from the rotation pattern to the new part of the draft period. When the end of the rotation pattern is reached, it is automatically restarted from the beginning. If weekend shift patterns in your organization are different from weekday shift patterns, use a rotation pattern of one week or a whole number of weeks, for example, 14, 21, or 28 days. The default length is 28 days. Then the pattern is always repeated on the same weekdays and you can set the shifts for different weekdays. For instructions about editing the rotation pattern, see Section 15.3.13, "Viewing and editing the rotation pattern" . 15.3.3. Employee Rostering tenants The Employee Rostering application supports multiple tenants . Each tenant is an independent set of data, including inputs and roster outputs. Changing data for one tenant does not affect other tenants. You can switch between tenants to use several independent data sets, for example, to prepare employee rosters for different locations. Several sample tenants are present after installation, representing several typical enterprise types such as a factory or hospital. You can select any of these tenants and modify them to suit your needs. You can also create a new tenant to enter data from a blank slate. 15.3.3.1. Changing an Employee Rostering tenant You can change the current tenant. After you select a different tenant, all of the displayed information refers to this tenant and any changes you make affect only this tenant. Procedure In the Employee Rostering application web interface, in the top right part of the browser window, click the Tenant list. Select a tenant from the list. 15.3.3.2. Creating a tenant You can create a new tenant to enter data from a blank slate. When creating a tenant, you can set several parameters that determine how the application prepares the output for this tenant. Important You cannot change tenant parameters after you create the tenant. Procedure To create a new tenant in the Employee Rostering application web interface, in the top right corner of the browser window click the settings (gear) icon then click Add . Set the following values: Name : The name of the new tenant. This name is displayed in the list of tenants. Schedule Start Date : The start date of the initial draft period. After you publish the roster, this date becomes the start date of the published period. The weekday of this date always remains the weekday that starts the draft period, any particular published period, and the first use of the rotation pattern. So it is usually most convenient to set the start date to the start of a week (Sunday or Monday). Draft Length (days) : The length of the draft period. The draft period stays the same length for the lifetime of the tenant. Publish Notice (days) : The length of the publish notice period. Aspire to publish the final roster for any day at least this time in advance, so employees have enough notice to plan their personal life around their shift times. In the current version, this setting is not enforced in any way. Publish Length (days) : The length of the period that becomes published (fixed) every time you publish the roster. In the current version, this setting is fixed at 7 days. Rotation Length (days) : The length of the rotation pattern. Timezone : The timezone of the environment to which the roster applies. This timezone is used to determine the "current" date for user interface display. Click Save . The tenant is created with blank data. 15.3.4. Entering skills You can set all skills that are required in any position within the roster. For example, a 24-hour diner can require cooking, serving, bussing, and hosting skills, in addition to skills such as general human resources and restaurant operations. Procedure In the Employee Rostering application web interface, click the Skills tab. You can see the numbers of currently visible skills in the top right part of the browser window, for example, 1-15 of 34 . You can use the < and > buttons to display other skills in the list. You can enter any part of a skill name in the Search box to search for skills. Complete the following steps to add a new skill: Click Add . Enter the name of the new skill in the text field under Name . Click the Save icon. To edit the name of a skill, click the Edit Skill icon (pencil shape) to the skill. To delete a skill, click the Delete Skill icon (trashcan shape) to the skill. Note Within each tenant, skill names must be unique. You cannot delete a skill if the skill is associated with an employee or spot. 15.3.5. Entering spots You must enter the list of spots , which represent various positions at the business. For a diner, spots include the bar, the bussing stations, the front counter, the various kitchen stations, the serving areas, and the office. For each spot, you can select one or more required skills from the list that you entered in the Skills tab. The application rosters only employees that have all of the required skills for a spot into that spot. If the spot has no required skill, the application can roster any employee into the spot. Procedure To enter or change spots in the Employee Rostering application web interface, click the Spots tab. You can enter any part of a spot name in the Search box to search for spots. Complete the following steps to add a new spot: Click Add Spot . Enter the name of the new spot in the text field under Name . Optional: Select one or more skills from the drop-down list under Required skill set . Click the Save icon. To edit the name and required skills for a spot, click the Edit Spot icon (pencil shape) to the spot. To delete a spot, click the Delete Spot icon (trashcan shape) to the spot. Note Within each tenant, spot names must be unique. You cannot delete a spot when any shifts are created for it. 15.3.6. Entering the list of contracts You must enter the list of all of the types of contracts that the business uses for employees. A contract determines the maximum time that the employee can work in a day, calendar week, calendar month, or calendar year. When creating a contract, you can set any of the limitations or none at all. For example, a part-time employee might not be allowed to work more than 20 hours in a week, while a full-time employee might be limited to 10 hours in a day and 1800 hours in a year. Another contract might include no limitations on worked hours. You must enter all work time limits for contracts in minutes. Procedure To enter or change the list of contracts in the Employee Rostering application web interface, click the Contracts tab. You can see the numbers of currently visible contracts in the top right part of the browser window, for example, 1-15 of 34 . You can use the < and > buttons to display other contracts in the list. You can enter any part of a contract name in the Search box to search for contracts. Complete the following steps to add a new contract: Click Add . Enter the name of the contract in the text field under Name . Enter the required time limits under Maximum minutes : If the employee must not work more than a set time per day, enable the check box at Per Day and enter the amount of minutes in the field to this check box. If the employee must not work more than a set time per calendar week, enable the check box at Per Week and enter the amount of minutes in the field to this check box. If the employee must not work more than a set time per calendar month, enable the check box at Per Month and enter the amount of minutes in the field to this check box. If the employee must not work more than a set time per calendar year, enable the check box at Per Year and enter the amount of minutes in the field to this check box. Click the Save icon. To edit the name and time limits for a contract, click the Edit Contract icon (pencil shape) to the name of the contract. To delete a contract, click the Delete Contract icon (trashcan shape) to the name of the contract. Note Within each tenant, contract names must be unique. You cannot delete a contract if it is assigned to any employee. 15.3.7. Entering the list of employees You must enter the list of all employees of the business, the skills they possess, and the contracts that apply to them. The application rosters these employees to spots according to their skills and according to the work time limits in the contracts. Procedure To enter or change the list of employees in the Employee Rostering application web interface, click the Employees tab. You can see the numbers of currently visible employees in the top right part of the browser window, for example, 1-15 of 34 . You can use the < and > buttons to display other employees in the list. You can enter any part of an employee name in the Search box to search for employees. Complete the following steps to add a new employee: Click Add . Enter the name of the employee in the text field under Name . Optional: Select one or more skills from the drop-down list under Skill set . Select a contract from the drop-down list under Contract . Click the Save icon. To edit the name and skills for an employee, click the Edit Employee icon (pencil shape) to the name of the employee. To delete an employee, click the Delete Employee icon (trashcan shape) to the name of the employee. Note Within each tenant, employee names must be unique. You cannot delete employees if they are rostered to any shifts. 15.3.8. Setting employee availability You can set the availability of employees for particular time spans. If an employee is unavailable for a particular time span, the employee can never be assigned to any shift during this time span (for example, if the employee has called in sick or is on vacation). Undesired and desired are employee preferences for particular time spans; the application accommodates them when possible. Procedure To view and edit employee availability in the Employee Rostering application web interface, click the Availability Roster tab. In the top left part of the window, you can see the dates for which the roster is displayed. To view other weeks, you can use the < and > buttons to the Week of field. Alternatively, you can click the date field and change the date to view the week that includes this date. To create an availability entry for an employee, click empty space on the schedule and then select an employee. Initially, an Unavailable entry for the entire day is created. To change an availability entry, click the entry. You can change the following settings: From and To date and time: The time span to which the availability entry applies. Status: you can select Unavailable , Desired , or Undesired status from a drop-down list. To save the entry, click Apply . To delete an availability entry, click the entry, then click Delete availability . You can also change or delete an availability entry by moving the mouse pointer over the entry and then clicking one of the icons displayed over the entry: Click the icon to set the status of the entry to Unavailable . Click the icon to set the status of the entry to Undesired . Click the icon to set the status of the entry to Desired . Click the icon to delete the entry. Important If an employee is already assigned to a shift and then you create or change an availability entry during this shift, the assignment is not changed automatically. You must create the employee shift roster again to apply new or changed availability entries. 15.3.9. Viewing and editing shifts in the shift roster The Shift Roster is a table of all spots and all possible time spans. If an employee must be present in a spot during a time span, a shift must exist for this spot and this time span. If a spot requires several employees at the same time, you can create several shifts for the same spot and time span. Each shift is represented by a rectangle at the intersection of a spot (row) and time span (column). When new time is added to the draft period, the application copies the shifts (and default employees, if present) from the rotation pattern into this new part of the draft period. You can also manually add and edit shifts in the draft period. Procedure To view and edit the shift roster in the Employee Rostering application web interface, click the Shift tab. In the top left part of the window, you can see the dates for which the roster is displayed. To view other weeks, you can use the < and > buttons to the Week of field. Alternatively, you can click the date field and change the date to view the week that includes this date. To add a shift, click an open area of the schedule. The application adds a shift, determining the slot and time span automatically from the location of the click. To edit a shift, click the shift. You can set the following values for a shift: From and To date and time: The exact time and duration of the shift. Employee : The employee assigned to the shift. Pinned : Whether the employee is pinned to the shift. If an employee is pinned, automatic employee rostering cannot change the assignment of the employee to the shift. A pinned employee is not automatically replicated to any other shift. To save the changes, click Apply . To delete a shift, click the shift, and then click Delete shift . 15.3.10. Creating and viewing the employee shift roster You can use the application to create and view the optimal shift roster for all employees. Procedure To view and edit the shift roster in the Employee Rostering application web interface, click the Shift tab. To create the optimal shift roster, click Schedule . The application takes 30 seconds to find the optimal solution. Result When the operation is finished, the Shift Roster view contains the optimal shift roster. The new roster is created for the draft period. The operation does not modify the published periods. In the top left part of the window, you can see the dates for which the roster is displayed. To view other weeks, you can use the < and > buttons to the Week of field. Alternatively, you can click the date field and change the date to view the week that includes this date. In the draft period, the borders of boxes that represent shifts are dotted lines. In the published periods, the borders are unbroken lines. The color of the boxes that represent shifts shows the constraint status of every shift: Strong green: Soft constraint matched; for example, the shift is in a "desired" timeslot for the employee. Pale green: No constraint broken. Grey: Soft constraint broken; for example, the shift is in an "undesired" timeslot for the employee. Yellow: Medium constraint broken; for example, no employee is assigned to the shift. Red: Hard constraint broken; for example, an employee has two shifts at the same time. 15.3.11. Viewing employee shifts You can view the assigned shifts for particular employees in an employee-centric table. The information is the same as the Shift Roster, but the viewing format might be more convenient for informing employees about their assigned shifts. Procedure To view a table of employees and shifts in the Employee Rostering application web interface, click the Availability Roster tab. In the top left part of the window, you can see the dates for which the roster is displayed. To view other weeks, you can use the < and > buttons to the Week of field. Alternatively, you can click the date field and change the date to view the week that includes this date. You can see the numbers of currently visible employees in the top right part of the browser window, for example, 1-10 of 34 . You can use the < and > buttons to the numbers to display other employees in the list. In the draft period, the borders of boxes representing shifts are dotted lines. In the published periods, the borders are unbroken lines. 15.3.12. Publishing the shift roster When you publish the shift roster, the first week of the draft period becomes published. Automatic employee rostering no longer changes any shift assignments in the published period, though emergency manual changing is still possible. The draft period is shifted one week later. For details about draft and published periods, see Section 15.3.1, "The draft and published periods" . Procedure To view and edit the shift roster in the Employee Rostering application web interface, click the Shift tab. Review the shift roster for the first week of the draft period to ensure that it is acceptable. Click Publish . 15.3.13. Viewing and editing the rotation pattern The rotation pattern enables you to add, move, and delete shifts so you can manage your employee resources efficiently. It is defined by time buckets and seats. A time bucket describes a time slot (for example, 9:00 a.m. to 5:00 p.m.) for a particular spot or location (A) (for example, Anaesthetics), over two or more days, and any skills that are required (for example, firearm training). A seat (B) is an employee assignment for a particular day in a specific time bucket. An employee stub is an icon that represents an employee that is available to be assigned to a time bucket. Employee stubs are listed in the Employee Stub List . For more information about the rotation pattern, see Section 15.3.2, "The rotation pattern" . Procedure Click the Rotation tab to view and edit the rotation pattern. Select a spot from the Rotation menu. Click Add New Time Bucket . The Creating Working Time Bucket dialog is displayed. Specify a start and end time, select any additional required skills, select the days for this time bucket, and click Save . The unassigned seats for that time bucket appears on the Rotation page organized by time ranges. To create an employee stub list so that you can add employees to the rotation, click Edit Employee Stub List . In the Edit Employee Stub List dialog, click Add Employee and select an employee from the list. Add all of the employees required for this stub list and click Save . The employees appear above the time buckets on the Rotation page. Click an employee icon to select an employee from the employee stub list. Click and drag the mouse over the seats of a time bucket to assign the selected employee to those seats. The seat is populated with the employee icon. Note A time bucket can only have one employee assigned to it for each day. To add multiple employees to the same time bucket, copy the time bucket and change the employee name as required. To provision the schedule, click Scheduling and select the spot that you created the rotation for. Click Provision and specify the date range. Deselect the spots that you do not want to include in this schedule. Click the arrow to the selected spot and deselect any time buckets that you do not want to use in your schedule. Click Provision Shifts . The calendar is populated with shifts generated from the time buckets. To modify a shift, click a generated shift on the calendar.
[ "mvn clean install -DskipTests -DskipITs", "login <account-url> --token <login-token>", "new-project optaweb-employee-rostering", "./runOnOpenShift.sh", "You can access the application at <URL> once the deployment is done." ]
https://docs.redhat.com/en/documentation/red_hat_process_automation_manager/7.13/html/developing_solvers_with_red_hat_build_of_optaplanner_in_red_hat_process_automation_manager/assembly-optimizer-running-employee-rostering-openshift
Chapter 1. Red Hat Insights policies service overview
Chapter 1. Red Hat Insights policies service overview Policies evaluate system configurations in your environment, and can send notifications when changes occur. Policies you create are applicable to all systems in your Insights inventory. You can create and manage policies using the Red Hat Insights for Red Hat Enterprise Linux user interface in the Red Hat Hybrid Cloud Console, or using the Insights API. Policies can assist you by managing tasks such as: Raising an alert when particular conditions occur in your system configuration. Emailing a team when security packages are out of date on a system. Using policies to monitor configuration changes in your inventory and notifying by email requires: Setting user email preferences (if not already set). Creating a policy to detect configuration changes as a trigger and selecting email as the trigger action. Note Configure User Access in Red Hat Hybrid Cloud Console > the Settings icon (⚙) > Identity & Access Management > User Access > Users . See User Access Configuration Guide for Role-based Access Control (RBAC) with FedRAMP for more information about this feature and example use cases. 1.1. User Access settings in the Red Hat Hybrid Cloud Console All users on your account have access to most of the data in Insights for Red Hat Enterprise Linux. 1.1.1. Predefined User Access groups and roles To make groups and roles easier to manage, Red Hat provides two predefined groups and a set of predefined roles. 1.1.1.1. Predefined groups The Default access group contains all users in your organization. Many predefined roles are assigned to this group. It is automatically updated by Red Hat. Note If the Organization Administrator makes changes to the Default access group its name changes to Custom default access group and it is no longer updated by Red Hat. The Default admin access group contains only users who have Organization Administrator permissions. This group is automatically maintained and users and roles in this group cannot be changed. 1.1.2. User Access roles for the Policies service The following predefined roles on the Red Hat Hybrid Cloud Console enable access to policies features in Insights for Red Hat Enterprise Linux: Policies administrator role. The Policies administrator role provides read and write access allowing these users to perform any available operation on policies resources. This predefined role is in the Default admin access group . Policies viewer role. The Policies viewer role provides read-only access. (If your organization determines that the default configuration of the Policies viewer role is inadequate, a User Access administrator can create a custom role with the specific permissions that you need.) This predefined role is in the Default access group . Note If you configured groups before April 2023, any user who was not an Organization Administrator will have the Policies administrator role replaced with the Policies viewer role. Modifications made to the Default access group before April are not changed. Additional Resources How to use User Access in the User Access Configuration Guide for Role-based Access Control (RBAC). Predefined User Access roles
null
https://docs.redhat.com/en/documentation/red_hat_insights/1-latest/html/monitoring_and_reacting_to_configuration_changes_using_policies_with_fedramp/intro-policies
9.4. Configuration Examples
9.4. Configuration Examples 9.4.1. MySQL Changing Database Location When using Red Hat Enterprise Linux 6, the default location for MySQL to store its database is /var/lib/mysql/ . This is where SELinux expects it to be by default, and hence this area is already labeled appropriately for you, using the mysqld_db_t type. The location where the database is stored can be changed depending on individual environment requirements or preferences, however it is important that SELinux is aware of this new location; that it is labeled accordingly. This example explains how to change the location of a MySQL database and then how to label the new location so that SELinux can still provide its protection mechanisms to the new area based on its contents. Note that this is an example only and demonstrates how SELinux can affect MySQL. Comprehensive documentation of MySQL is beyond the scope of this document. Refer to the official MySQL documentation for further details. This example assumes that the mysql-server and setroubleshoot-server packages are installed, that the auditd service is running, and that there is a valid database in the default location of /var/lib/mysql/ . Run the ls -lZ /var/lib/mysql command to view the SELinux context of the default database location for mysql : This shows mysqld_db_t which is the default context element for the location of database files. This context will have to be manually applied to the new database location that will be used in this example in order for it to function properly. Enter mysqlshow -u root -p and enter the mysqld root password to show the available databases: Shut down the mysqld daemon with the service mysqld stop command as the root user: Create a new directory for the new location of the database(s). In this example, /mysql/ is used: Copy the database files from the old location to the new location: Change the ownership of this location to allow access by the mysql user and group. This sets the traditional Unix permissions which SELinux will still observe. Run the ls -lZ /opt command to see the initial context of the new directory: The context usr_t of this newly created directory is not currently suitable to SELinux as a location for MySQL database files. Once the context has been changed, MySQL will be able to function properly in this area. Open the main MySQL configuration file /etc/my.cnf with a text editor and modify the datadir option so that it refers to the new location. In this example the value that should be entered is /mysql . Save this file and exit. Run the service mysqld start command as the root user to start mysqld . The service should fail to start, and a denial will be logged to the /var/log/messages file. However, if the audit daemon is running alongside the setroubleshoot service, the denial will be logged to the /var/log/audit/audit.log file instead: The reason for this denial is that /mysql/ is not labeled correctly for MySQL data files. SELinux is stopping MySQL from having access to the content labeled as usr_t . Perform the following steps to resolve this problem: Run the following semanage command to add a context mapping for /mysql . Note that semanage is not installed by default. If it is missing on your system, install the policycoreutils-python package. This mapping is written to the /etc/selinux/targeted/contexts/files/file_contexts.local file: Now use the restorecon command to apply this context mapping to the running system: Now that the /mysql/ location has been labeled with the correct context for MySQL, the mysqld daemon starts: Confirm the context has changed for /mysql/ : The location has been changed and labeled, and the mysqld daemon has started successfully. At this point all running services should be tested to confirm normal operation.
[ "~]# ls -lZ /var/lib/mysql drwx------. mysql mysql unconfined_u:object_r: mysqld_db_t :s0 mysql", "~]# mysqlshow -u root -p Enter password: ******* +--------------------+ | Databases | +--------------------+ | information_schema | | mysql | | test | | wikidb | +--------------------+", "~]# service mysqld stop Stopping MySQL: [ OK ]", "~]# mkdir -p /mysql", "~]# cp -R /var/lib/mysql/* /mysql/", "~]# chown -R mysql:mysql /mysql", "~]# ls -lZ /opt drwxr-xr-x. mysql mysql unconfined_u:object_r: usr_t :s0 mysql", "[mysqld] datadir=/mysql", "SELinux is preventing /usr/libexec/mysqld \"write\" access on /mysql. For complete SELinux messages. run sealert -l b3f01aff-7fa6-4ebe-ad46-abaef6f8ad71", "~]# semanage fcontext -a -t mysqld_db_t \"/mysql(/.*)?\"", "~]# grep -i mysql /etc/selinux/targeted/contexts/files/file_contexts.local /mysql(/.*)? system_u:object_r:mysqld_db_t:s0", "~]# restorecon -R -v /mysql", "~]# service mysqld start Starting MySQL: [ OK ]", "~]USD ls -lZ /opt drwxr-xr-x. mysql mysql system_u:object_r: mysqld_db_t :s0 mysql" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/managing_confined_services/sect-managing_confined_services-mysql-configuration_examples
26.8. Installing a CA Into an Existing IdM Domain
26.8. Installing a CA Into an Existing IdM Domain If an IdM domain was installed without a Certificate Authority (CA), you can install the CA services subsequently. Depending on your environment, you can install the IdM Certificate Server CA or use an external CA. Note For details on the supported CA configurations, see Section 2.3.2, "Determining What CA Configuration to Use" . Installing an IdM Certificate Server Use the following command to install the IdM Certificate Server CA: Run the ipa-certupdate utility on all servers and clients to update them with the information about the new certificate from LDAP. You must run ipa-certupdate on every server and client separately. Important Always run ipa-certupdate after manually installing a certificate. If you do not, the certificate will not be distributed to the other machines. Installing External CA The subsequent installation of an external CA consists of multiple steps: Start the installation: After this step an information is shown that a certificate signing request (CSR) was saved. Submit the CSR to the external CA and copy the issued certificate to the IdM server. Continue the installation with passing the certificates and full path to the external CA files to ipa-ca-install : Run the ipa-certupdate utility on all servers and clients to update them with the information about the new certificate from LDAP. You must run ipa-certupdate on every server and client separately. Important Always run ipa-certupdate after manually installing a certificate. If you do not, the certificate will not be distributed to the other machines. The CA installation does not replace the existing service certificates for the LDAP and web server with ones issued by the new installed CA. For details how to replace the certificates, see Section 26.9, "Replacing the Web Server's and LDAP Server's Certificate" .
[ "[root@ipa-server ~] ipa-ca-install", "[root@ipa-server ~] ipa-ca-install --external-ca", "ipa-ca-install --external-cert-file=/root/ master .crt --external-cert-file=/root/ca.crt" ]
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/linux_domain_identity_authentication_and_policy_guide/CA-less-to-CA
11.2. Disabling vhost-net
11.2. Disabling vhost-net The vhost-net module is a kernel-level back end for virtio networking that reduces virtualization overhead by moving virtio packet processing tasks out of user space (the qemu process) and into the kernel (the vhost-net driver). vhost-net is only available for virtio network interfaces. If the vhost-net kernel module is loaded, it is enabled by default for all virtio interfaces, but can be disabled in the interface configuration in the case that a particular workload experiences a degradation in performance when vhost-net is in use. Specifically, when UDP traffic is sent from a host machine to a guest virtual machine on that host, performance degradation can occur if the guest virtual machine processes incoming data at a rate slower than the host machine sends it. In this situation, enabling vhost-net causes the UDP socket's receive buffer to overflow more quickly, which results in greater packet loss. It is therefore better to disable vhost-net in this situation to slow the traffic, and improve overall performance. To disable vhost-net , edit the <interface> sub-element in the guest virtual machine's XML configuration file and define the network as follows: Setting the driver name to qemu forces packet processing into qemu user space, effectively disabling vhost-net for that interface.
[ "<interface type=\"network\"> <model type=\"virtio\"/> <driver name=\"qemu\"/> </interface>" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/virtualization_host_configuration_and_guest_installation_guide/sec-virtualization_host_configuration_and_guest_installation_guide-network_configuration-disabling_vhostnet
Chapter 20. Configuring PTP Using ptp4l
Chapter 20. Configuring PTP Using ptp4l 20.1. Introduction to PTP The Precision Time Protocol ( PTP ) is a protocol used to synchronize clocks in a network. When used in conjunction with hardware support, PTP is capable of sub-microsecond accuracy, which is far better than is normally obtainable with NTP . PTP support is divided between the kernel and user space. The kernel in Red Hat Enterprise Linux includes support for PTP clocks, which are provided by network drivers. The actual implementation of the protocol is known as linuxptp , a PTPv2 implementation according to the IEEE standard 1588 for Linux. The linuxptp package includes the ptp4l and phc2sys programs for clock synchronization. The ptp4l program implements the PTP boundary clock and ordinary clock. With hardware time stamping, it is used to synchronize the PTP hardware clock to the master clock, and with software time stamping it synchronizes the system clock to the master clock. The phc2sys program is needed only with hardware time stamping, for synchronizing the system clock to the PTP hardware clock on the network interface card ( NIC ). 20.1.1. Understanding PTP The clocks synchronized by PTP are organized in a master-slave hierarchy. The slaves are synchronized to their masters which may be slaves to their own masters. The hierarchy is created and updated automatically by the best master clock ( BMC ) algorithm, which runs on every clock. When a clock has only one port, it can be master or slave , such a clock is called an ordinary clock ( OC ). A clock with multiple ports can be master on one port and slave on another, such a clock is called a boundary clock ( BC ). The top-level master is called the grandmaster clock , which can be synchronized by using a Global Positioning System ( GPS ) time source. By using a GPS-based time source, disparate networks can be synchronized with a high-degree of accuracy. Figure 20.1. PTP grandmaster, boundary, and slave Clocks 20.1.2. Advantages of PTP One of the main advantages that PTP has over the Network Time Protocol ( NTP ) is hardware support present in various network interface controllers ( NIC ) and network switches. This specialized hardware allows PTP to account for delays in message transfer, and greatly improves the accuracy of time synchronization. While it is possible to use non-PTP enabled hardware components within the network, this will often cause an increase in jitter or introduce an asymmetry in the delay resulting in synchronization inaccuracies, which add up with multiple non-PTP aware components used in the communication path. To achieve the best possible accuracy, it is recommended that all networking components between PTP clocks are PTP hardware enabled. Time synchronization in larger networks where not all of the networking hardware supports PTP might be better suited for NTP . With hardware PTP support, the NIC has its own on-board clock, which is used to time stamp the received and transmitted PTP messages. It is this on-board clock that is synchronized to the PTP master, and the computer's system clock is synchronized to the PTP hardware clock on the NIC. With software PTP support, the system clock is used to time stamp the PTP messages and it is synchronized to the PTP master directly. Hardware PTP support provides better accuracy since the NIC can time stamp the PTP packets at the exact moment they are sent and received while software PTP support requires additional processing of the PTP packets by the operating system. 20.2. Using PTP In order to use PTP , the kernel network driver for the intended interface has to support either software or hardware time stamping capabilities. 20.2.1. Checking for Driver and Hardware Support In addition to hardware time stamping support being present in the driver, the NIC must also be capable of supporting this functionality in the physical hardware. The best way to verify the time stamping capabilities of a particular driver and NIC is to use the ethtool utility to query the interface. In this example, eth3 is the interface you want to check: Note The PTP Hardware Clock value printed by ethtool is the index of the PTP hardware clock. It corresponds to the naming of the /dev/ptp* devices. The first PHC has an index of 0. For software time stamping support, the parameters list should include: SOF_TIMESTAMPING_SOFTWARE SOF_TIMESTAMPING_TX_SOFTWARE SOF_TIMESTAMPING_RX_SOFTWARE For hardware time stamping support, the parameters list should include: SOF_TIMESTAMPING_RAW_HARDWARE SOF_TIMESTAMPING_TX_HARDWARE SOF_TIMESTAMPING_RX_HARDWARE 20.2.2. Installing PTP The kernel in Red Hat Enterprise Linux includes support for PTP . User space support is provided by the tools in the linuxptp package. To install linuxptp , issue the following command as root : This will install ptp4l and phc2sys . Do not run more than one service to set the system clock's time at the same time. If you intend to serve PTP time using NTP , see Section 20.8, "Serving PTP Time with NTP" . 20.2.3. Starting ptp4l The ptp4l program can be started from the command line or it can be started as a service. When running as a service, options are specified in the /etc/sysconfig/ptp4l file. Options required for use both by the service and on the command line should be specified in the /etc/ptp4l.conf file. The /etc/sysconfig/ptp4l file includes the -f /etc/ptp4l.conf command line option, which causes the ptp4l program to read the /etc/ptp4l.conf file and process the options it contains. The use of the /etc/ptp4l.conf is explained in Section 20.4, "Specifying a Configuration File" . More information on the different ptp4l options and the configuration file settings can be found in the ptp4l(8) man page. Starting ptp4l as a Service To start ptp4l as a service, issue the following command as root : For more information on managing system services in Red Hat Enterprise Linux 7, see Chapter 10, Managing Services with systemd . Using ptp4l From The Command Line The ptp4l program tries to use hardware time stamping by default. To use ptp4l with hardware time stamping capable drivers and NICs, you must provide the network interface to use with the -i option. Enter the following command as root : Where eth3 is the interface you want to configure. Below is example output from ptp4l when the PTP clock on the NIC is synchronized to a master: The master offset value is the measured offset from the master in nanoseconds. The s0 , s1 , s2 strings indicate the different clock servo states: s0 is unlocked, s1 is clock step and s2 is locked. Once the servo is in the locked state ( s2 ), the clock will not be stepped (only slowly adjusted) unless the pi_offset_const option is set to a positive value in the configuration file (described in the ptp4l(8) man page). The adj value is the frequency adjustment of the clock in parts per billion (ppb). The path delay value is the estimated delay of the synchronization messages sent from the master in nanoseconds. Port 0 is a Unix domain socket used for local PTP management. Port 1 is the eth3 interface (based on the example above.) INITIALIZING, LISTENING, UNCALIBRATED and SLAVE are some of possible port states which change on the INITIALIZE, RS_SLAVE, MASTER_CLOCK_SELECTED events. In the last state change message, the port state changed from UNCALIBRATED to SLAVE indicating successful synchronization with a PTP master clock. Logging Messages From ptp4l By default, messages are sent to /var/log/messages . However, specifying the -m option enables logging to standard output which can be useful for debugging purposes. To enable software time stamping, the -S option needs to be used as follows: 20.2.3.1. Selecting a Delay Measurement Mechanism There are two different delay measurement mechanisms and they can be selected by means of an option added to the ptp4l command as follows: -P The -P selects the peer-to-peer ( P2P ) delay measurement mechanism. The P2P mechanism is preferred as it reacts to changes in the network topology faster, and may be more accurate in measuring the delay, than other mechanisms. The P2P mechanism can only be used in topologies where each port exchanges PTP messages with at most one other P2P port. It must be supported and used by all hardware, including transparent clocks, on the communication path. -E The -E selects the end-to-end ( E2E ) delay measurement mechanism. This is the default. The E2E mechanism is also referred to as the delay "request-response" mechanism. -A The -A enables automatic selection of the delay measurement mechanism. The automatic option starts ptp4l in E2E mode. It will change to P2P mode if a peer delay request is received. Note All clocks on a single PTP communication path must use the same mechanism to measure the delay. Warnings will be printed in the following circumstances: When a peer delay request is received on a port using the E2E mechanism. When a E2E delay request is received on a port using the P2P mechanism. 20.3. Using PTP with Multiple Interfaces When using PTP with multiple interfaces in different networks, it is necessary to change the reverse path forwarding mode to loose mode. Red Hat Enterprise Linux 7 defaults to using Strict Reverse Path Forwarding following the Strict Reverse Path recommendation from RFC 3704, Ingress Filtering for Multihomed Networks . See the Reverse Path Forwarding section in the Red Hat Enterprise Linux 7 Security Guide for more details. The sysctl utility is used to read and write values to tunables in the kernel. Changes to a running system can be made using sysctl commands directly on the command line and permanent changes can be made by adding lines to the /etc/sysctl.conf file. To change to loose mode filtering globally, enter the following commands as root : To change the reverse path filtering mode per network interface, use the net.ipv4. interface .rp_filter command on all PTP interfaces. For example, for an interface with device name em1 : To make these settings persistent across reboots, modify the /etc/sysctl.conf file. You can change the mode for all interfaces, or for a particular interface. To change the mode for all interfaces, open the /etc/sysctl.conf file with an editor running as the root user and add a line as follows: To change only certain interfaces, add multiple lines in the following format: Note When using the settings for all and particular interfaces as well, maximum value from conf/{all,interface}/rp_filter is used when doing source validation on each interface. You can also change the mode by using the default setting, which means that it applies only to the newly created interfaces. For more information on using the all , default , or a specific device settings in the sysctl parameters, see the Red Hat Knowledgebase article What is the difference between "all", "default" and a specific device in a sysctl parameter? . Note that you might experience issues of two types due to the timing of the sysctl service run during the boot process: Drivers are loaded before the sysctl service runs. In this case, affected network interfaces use the mode preset from the kernel, and sysctl defaults are ignored. For solution of this problem, see the Red Hat Knowledgebase article What is the difference between "all", "default" and a specific device in a sysctl parameter? . Drivers are loaded or reloaded after the sysctl service runs. In this case, it is possible that some sysctl.conf parameters are not used after reboot. These settings may not be available or they may return to defaults. For solution of this problem, see the Red Hat Knowledgebase article Some sysctl.conf parameters are not used after reboot, manually adjusting the settings works as expected . 20.4. Specifying a Configuration File The command line options and other options, which cannot be set on the command line, can be set in an optional configuration file. No configuration file is read by default, so it needs to be specified at runtime with the -f option. For example: A configuration file equivalent to the -i eth3 -m -S options shown above would look as follows: 20.5. Using the PTP Management Client The PTP management client, pmc , can be used to obtain additional information from ptp4l as follows: Setting the -b option to zero limits the boundary to the locally running ptp4l instance. A larger boundary value will retrieve the information also from PTP nodes further from the local clock. The retrievable information includes: stepsRemoved is the number of communication paths to the grandmaster clock. offsetFromMaster and master_offset is the last measured offset of the clock from the master in nanoseconds. meanPathDelay is the estimated delay of the synchronization messages sent from the master in nanoseconds. if gmPresent is true, the PTP clock is synchronized to a master, the local clock is not the grandmaster clock. gmIdentity is the grandmaster's identity. For a full list of pmc commands, type the following as root : Additional information is available in the pmc(8) man page. 20.6. Synchronizing the Clocks The phc2sys program is used to synchronize the system clock to the PTP hardware clock ( PHC ) on the NIC. The phc2sys service is configured in the /etc/sysconfig/phc2sys configuration file. The default setting in the /etc/sysconfig/phc2sys file is as follows: The -a option causes phc2sys to read the clocks to be synchronized from the ptp4l application. It will follow changes in the PTP port states, adjusting the synchronization between the NIC hardware clocks accordingly. The system clock is not synchronized, unless the -r option is also specified. If you want the system clock to be eligible to become a time source, specify the -r option twice. After making changes to /etc/sysconfig/phc2sys , restart the phc2sys service from the command line by issuing a command as root : Under normal circumstances, use systemctl commands to start, stop, and restart the phc2sys service. When you do not want to start phc2sys as a service, you can start it from the command line. For example, enter the following command as root : The -a option causes phc2sys to read the clocks to be synchronized from the ptp4l application. If you want the system clock to be eligible to become a time source, specify the -r option twice. Alternately, use the -s option to synchronize the system clock to a specific interface's PTP hardware clock. For example: The -w option waits for the running ptp4l application to synchronize the PTP clock and then retrieves the TAI to UTC offset from ptp4l . Normally, PTP operates in the International Atomic Time ( TAI ) timescale, while the system clock is kept in Coordinated Universal Time ( UTC ). The current offset between the TAI and UTC timescales is 36 seconds. The offset changes when leap seconds are inserted or deleted, which typically happens every few years. The -O option needs to be used to set this offset manually when the -w is not used, as follows: Once the phc2sys servo is in a locked state, the clock will not be stepped, unless the -S option is used. This means that the phc2sys program should be started after the ptp4l program has synchronized the PTP hardware clock. However, with -w , it is not necessary to start phc2sys after ptp4l as it will wait for it to synchronize the clock. The phc2sys program can also be started as a service by running: When running as a service, options are specified in the /etc/sysconfig/phc2sys file. More information on the different phc2sys options can be found in the phc2sys(8) man page. Note that the examples in this section assume the command is run on a slave system or slave port. 20.7. Verifying Time Synchronization When PTP time synchronization is working correctly, new messages with offsets and frequency adjustments are printed periodically to the ptp4l and phc2sys outputs if hardware time stamping is used. The output values converge shortly. You can see these messages in the /var/log/messages file. The following examples of the ptp4l and the phc2sys output contain: offset (in nanoseconds) frequency offset (in parts per billion (ppb)) path delay (in nanoseconds) Example of the ptp4l output: Example of the phc2sys output: To reduce the ptp4l output and print only the values, use the summary_interval directive. The summary_interval directive is specified as 2 to the power of n in seconds. For example, to reduce the output to every 1024 seconds, add the following line to the /etc/ptp4l.conf file: An example of the ptp4l output, with summary_interval set to 6: By default, summary_interval is set to 0, so messages are printed once per second, which is the maximum frequency. The messages are logged at the LOG_INFO level. To disable messages, use the -l option to set the maximum log level to 5 or lower: You can use the -u option to reduce the phc2sys output: Where summary-updates is the number of clock updates to include in summary statistics. An example follows: When used with these options, the interval for updating the statistics is set to 60 seconds ( -u ), phc2sys waits until ptp4l is in synchronized state ( -w ), and messages are printed to the standard output ( -m ). For further details about the phc2sys options, see the phc2sys(5) man page. The output includes: offset root mean square (rms) maximum absolute offset (max) frequency offset (freq): its mean, and standard deviation path delay (delay): its mean, and standard deviation 20.8. Serving PTP Time with NTP The ntpd daemon can be configured to distribute the time from the system clock synchronized by ptp4l or phc2sys by using the LOCAL reference clock driver. To prevent ntpd from adjusting the system clock, the ntp.conf file must not specify any NTP servers. The following is a minimal example of ntp.conf : Note When the DHCP client program, dhclient , receives a list of NTP servers from the DHCP server, it adds them to ntp.conf and restarts the service. To disable that feature, add PEERNTP=no to /etc/sysconfig/network . 20.9. Serving NTP Time with PTP NTP to PTP synchronization in the opposite direction is also possible. When ntpd is used to synchronize the system clock, ptp4l can be configured with the priority1 option (or other clock options included in the best master clock algorithm) to be the grandmaster clock and distribute the time from the system clock via PTP : With hardware time stamping, phc2sys needs to be used to synchronize the PTP hardware clock to the system clock. If running phc2sys as a service, edit the /etc/sysconfig/phc2sys configuration file. The default setting in the /etc/sysconfig/phc2sys file is as follows: As root , edit that line as follows: The -r option is used twice here to allow synchronization of the PTP hardware clock on the NIC from the system clock. Restart the phc2sys service for the changes to take effect: To prevent quick changes in the PTP clock's frequency, the synchronization to the system clock can be loosened by using smaller P (proportional) and I (integral) constants for the PI servo: 20.10. Synchronize to PTP or NTP Time Using timemaster When there are multiple PTP domains available on the network, or fallback to NTP is needed, the timemaster program can be used to synchronize the system clock to all available time sources. The PTP time is provided by phc2sys and ptp4l via shared memory driver ( SHM reference clocks to chronyd or ntpd (depending on the NTP daemon that has been configured on the system). The NTP daemon can then compare all time sources, both PTP and NTP , and use the best sources to synchronize the system clock. On start, timemaster reads a configuration file that specifies the NTP and PTP time sources, checks which network interfaces have their own or share a PTP hardware clock (PHC), generates configuration files for ptp4l and chronyd or ntpd , and starts the ptp4l , phc2sys , and chronyd or ntpd processes as needed. It will remove the generated configuration files on exit. It writes configuration files for chronyd , ntpd , and ptp4l to /var/run/timemaster/ . 20.10.1. Starting timemaster as a Service To start timemaster as a service, issue the following command as root : This will read the options in /etc/timemaster.conf . For more information on managing system services in Red Hat Enterprise Linux 7, see Chapter 10, Managing Services with systemd . 20.10.2. Understanding the timemaster Configuration File Red Hat Enterprise Linux provides a default /etc/timemaster.conf file with a number of sections containing default options. The section headings are enclosed in brackets. To view the default configuration, issue a command as follows: Notice the section named as follows: This is an example of an NTP server section, "ntp-server.local" is an example of a host name for an NTP server on the local LAN. Add more sections as required using a host name or IP address as part of the section name. Note that the short polling values in that example section are not suitable for a public server, see Chapter 19, Configuring NTP Using ntpd for an explanation of suitable minpoll and maxpoll values. Notice the section named as follows: A "PTP domain" is a group of one or more PTP clocks that synchronize to each other. They may or may not be synchronized to clocks in another domain. Clocks that are configured with the same domain number make up the domain. This includes a PTP grandmaster clock. The domain number in each "PTP domain" section needs to correspond to one of the PTP domains configured on the network. An instance of ptp4l is started for every interface which has its own PTP clock and hardware time stamping is enabled automatically. Interfaces that support hardware time stamping have a PTP clock (PHC) attached, however it is possible for a group of interfaces on a NIC to share a PHC. A separate ptp4l instance will be started for each group of interfaces sharing the same PHC and for each interface that supports only software time stamping. All ptp4l instances are configured to run as a slave. If an interface with hardware time stamping is specified in more than one PTP domain, then only the first ptp4l instance created will have hardware time stamping enabled. Notice the section named as follows: The default timemaster configuration includes the system ntpd and chrony configuration ( /etc/ntp.conf or /etc/chronyd.conf ) in order to include the configuration of access restrictions and authentication keys. That means any NTP servers specified there will be used with timemaster too. The section headings are as follows: [ntp_server ntp-server.local] - Specify polling intervals for this server. Create additional sections as required. Include the host name or IP address in the section heading. [ptp_domain 0] - Specify interfaces that have PTP clocks configured for this domain. Create additional sections with, the appropriate domain number, as required. [timemaster] - Specify the NTP daemon to be used. Possible values are chronyd and ntpd . [chrony.conf] - Specify any additional settings to be copied to the configuration file generated for chronyd . [ntp.conf] - Specify any additional settings to be copied to the configuration file generated for ntpd . [ptp4l.conf] - Specify options to be copied to the configuration file generated for ptp4l . [chronyd] - Specify any additional settings to be passed on the command line to chronyd . [ntpd] - Specify any additional settings to be passed on the command line to ntpd . [phc2sys] - Specify any additional settings to be passed on the command line to phc2sys . [ptp4l] - Specify any additional settings to be passed on the command line to all instances of ptp4l . The section headings and there contents are explained in detail in the timemaster(8) manual page. 20.10.3. Configuring timemaster Options Editing the timemaster Configuration File To change the default configuration, open the /etc/timemaster.conf file for editing as root : For each NTP server you want to control using timemaster , create [ntp_server address ] sections. Note that the short polling values in the example section are not suitable for a public server, see Chapter 19, Configuring NTP Using ntpd for an explanation of suitable minpoll and maxpoll values. To add interfaces that should be used in a domain, edit the #[ptp_domain 0] section and add the interfaces. Create additional domains as required. For example: If required to use ntpd as the NTP daemon on this system, change the default entry in the [timemaster] section from chronyd to ntpd . See Chapter 18, Configuring NTP Using the chrony Suite for information on the differences between ntpd and chronyd. If using chronyd as the NTP server on this system, add any additional options below the default include /etc/chrony.conf entry in the [chrony.conf] section. Edit the default include entry if the path to /etc/chrony.conf is known to have changed. If using ntpd as the NTP server on this system, add any additional options below the default include /etc/ntp.conf entry in the [ntp.conf] section. Edit the default include entry if the path to /etc/ntp.conf is known to have changed. In the [ptp4l.conf] section, add any options to be copied to the configuration file generated for ptp4l . This chapter documents common options and more information is available in the ptp4l(8) manual page. In the [chronyd] section, add any command line options to be passed to chronyd when called by timemaster . See Chapter 18, Configuring NTP Using the chrony Suite for information on using chronyd . In the [ntpd] section, add any command line options to be passed to ntpd when called by timemaster . See Chapter 19, Configuring NTP Using ntpd for information on using ntpd . In the [phc2sys] section, add any command line options to be passed to phc2sys when called by timemaster . This chapter documents common options and more information is available in the phy2sys(8) manual page. In the [ptp4l] section, add any command line options to be passed to ptp4l when called by timemaster . This chapter documents common options and more information is available in the ptp4l(8) manual page. Save the configuration file and restart timemaster by issuing the following command as root : 20.11. Improving Accuracy Previously, test results indicated that disabling the tickless kernel capability could significantly improve the stability of the system clock, and thus improve the PTP synchronization accuracy (at the cost of increased power consumption). The kernel tickless mode can be disabled by adding nohz=off to the kernel boot option parameters. However, recent improvements applied to kernel-3.10.0-197.el7 have greatly improved the stability of the system clock and the difference in stability of the clock with and without nohz=off should be much smaller now for most users. The ptp4l and phc2sys applications can be configured to use a new adaptive servo. The advantage over the PI servo is that it does not require configuration of the PI constants to perform well. To make use of this for ptp4l , add the following line to the /etc/ptp4l.conf file: After making changes to /etc/ptp4l.conf , restart the ptp4l service from the command line by issuing the following command as root : To make use of this for phc2sys , add the following line to the /etc/sysconfig/phc2sys file: After making changes to /etc/sysconfig/phc2sys , restart the phc2sys service from the command line by issuing the following command as root : 20.12. Additional Resources The following sources of information provide additional resources regarding PTP and the ptp4l tools. 20.12.1. Installed Documentation ptp4l(8) man page - Describes ptp4l options including the format of the configuration file. pmc(8) man page - Describes the PTP management client and its command options. phc2sys(8) man page - Describes a tool for synchronizing the system clock to a PTP hardware clock (PHC). timemaster(8) man page - Describes a program that uses ptp4l and phc2sys to synchronize the system clock using chronyd or ntpd . 20.12.2. Useful Websites http://www.nist.gov/el/isd/ieee/ieee1588.cfm The IEEE 1588 Standard.
[ "~]# ethtool -T eth3 Time stamping parameters for eth3: Capabilities: hardware-transmit (SOF_TIMESTAMPING_TX_HARDWARE) software-transmit (SOF_TIMESTAMPING_TX_SOFTWARE) hardware-receive (SOF_TIMESTAMPING_RX_HARDWARE) software-receive (SOF_TIMESTAMPING_RX_SOFTWARE) software-system-clock (SOF_TIMESTAMPING_SOFTWARE) hardware-raw-clock (SOF_TIMESTAMPING_RAW_HARDWARE) PTP Hardware Clock: 0 Hardware Transmit Timestamp Modes: off (HWTSTAMP_TX_OFF) on (HWTSTAMP_TX_ON) Hardware Receive Filter Modes: none (HWTSTAMP_FILTER_NONE) all (HWTSTAMP_FILTER_ALL)", "~]# yum install linuxptp", "~]# systemctl start ptp4l", "~]# ptp4l -i eth3 -m", "~]# ptp4l -i eth3 -m selected eth3 as PTP clock port 1: INITIALIZING to LISTENING on INITIALIZE port 0: INITIALIZING to LISTENING on INITIALIZE port 1: new foreign master 00a069.fffe.0b552d-1 selected best master clock 00a069.fffe.0b552d port 1: LISTENING to UNCALIBRATED on RS_SLAVE master offset -23947 s0 freq +0 path delay 11350 master offset -28867 s0 freq +0 path delay 11236 master offset -32801 s0 freq +0 path delay 10841 master offset -37203 s1 freq +0 path delay 10583 master offset -7275 s2 freq -30575 path delay 10583 port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED master offset -4552 s2 freq -30035 path delay 10385", "~]# ptp4l -i eth3 -m -S", "~]# sysctl -w net.ipv4.conf.default.rp_filter=2 ~]# sysctl -w net.ipv4.conf.all.rp_filter=2", "~]# sysctl -w net.ipv4.conf.em1.rp_filter=2", "net.ipv4.conf.all.rp_filter=2", "net.ipv4.conf. interface .rp_filter=2", "~]# ptp4l -f /etc/ptp4l.conf", "~]# cat /etc/ptp4l.conf [global] verbose 1 time_stamping software [eth3]", "~]# pmc -u -b 0 'GET CURRENT_DATA_SET' sending: GET CURRENT_DATA_SET 90e2ba.fffe.20c7f8-0 seq 0 RESPONSE MANAGMENT CURRENT_DATA_SET stepsRemoved 1 offsetFromMaster -142.0 meanPathDelay 9310.0", "~]# pmc -u -b 0 'GET TIME_STATUS_NP' sending: GET TIME_STATUS_NP 90e2ba.fffe.20c7f8-0 seq 0 RESPONSE MANAGMENT TIME_STATUS_NP master_offset 310 ingress_time 1361545089345029441 cumulativeScaledRateOffset +1.000000000 scaledLastGmPhaseChange 0 gmTimeBaseIndicator 0 lastGmPhaseChange 0x0000'0000000000000000.0000 gmPresent true gmIdentity 00a069.fffe.0b552d", "~]# pmc help", "OPTIONS=\"-a -r\"", "~]# systemctl restart phc2sys", "~]# phc2sys -a -r", "~]# phc2sys -s eth3 -w", "~]# phc2sys -s eth3 -O -36", "~]# systemctl start phc2sys", "ptp4l[352.359]: selected /dev/ptp0 as PTP clock ptp4l[352.361]: port 1: INITIALIZING to LISTENING on INITIALIZE ptp4l[352.361]: port 0: INITIALIZING to LISTENING on INITIALIZE ptp4l[353.210]: port 1: new foreign master 00a069.fffe.0b552d-1 ptp4l[357.214]: selected best master clock 00a069.fffe.0b552d ptp4l[357.214]: port 1: LISTENING to UNCALIBRATED on RS_SLAVE ptp4l[359.224]: master offset 3304 s0 freq +0 path delay 9202 ptp4l[360.224]: master offset 3708 s1 freq -29492 path delay 9202 ptp4l[361.224]: master offset -3145 s2 freq -32637 path delay 9202 ptp4l[361.224]: port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED ptp4l[362.223]: master offset -145 s2 freq -30580 path delay 9202 ptp4l[363.223]: master offset 1043 s2 freq -29436 path delay 8972 ptp4l[364.223]: master offset 266 s2 freq -29900 path delay 9153 ptp4l[365.223]: master offset 430 s2 freq -29656 path delay 9153 ptp4l[366.223]: master offset 615 s2 freq -29342 path delay 9169 ptp4l[367.222]: master offset -191 s2 freq -29964 path delay 9169 ptp4l[368.223]: master offset 466 s2 freq -29364 path delay 9170 ptp4l[369.235]: master offset 24 s2 freq -29666 path delay 9196 ptp4l[370.235]: master offset -375 s2 freq -30058 path delay 9238 ptp4l[371.235]: master offset 285 s2 freq -29511 path delay 9199 ptp4l[372.235]: master offset -78 s2 freq -29788 path delay 9204", "phc2sys[526.527]: Waiting for ptp4l phc2sys[527.528]: Waiting for ptp4l phc2sys[528.528]: phc offset 55341 s0 freq +0 delay 2729 phc2sys[529.528]: phc offset 54658 s1 freq -37690 delay 2725 phc2sys[530.528]: phc offset 888 s2 freq -36802 delay 2756 phc2sys[531.528]: phc offset 1156 s2 freq -36268 delay 2766 phc2sys[532.528]: phc offset 411 s2 freq -36666 delay 2738 phc2sys[533.528]: phc offset -73 s2 freq -37026 delay 2764 phc2sys[534.528]: phc offset 39 s2 freq -36936 delay 2746 phc2sys[535.529]: phc offset 95 s2 freq -36869 delay 2733 phc2sys[536.529]: phc offset -359 s2 freq -37294 delay 2738 phc2sys[537.529]: phc offset -257 s2 freq -37300 delay 2753 phc2sys[538.529]: phc offset 119 s2 freq -37001 delay 2745 phc2sys[539.529]: phc offset 288 s2 freq -36796 delay 2766 phc2sys[540.529]: phc offset -149 s2 freq -37147 delay 2760 phc2sys[541.529]: phc offset -352 s2 freq -37395 delay 2771 phc2sys[542.529]: phc offset 166 s2 freq -36982 delay 2748 phc2sys[543.529]: phc offset 50 s2 freq -37048 delay 2756 phc2sys[544.530]: phc offset -31 s2 freq -37114 delay 2748 phc2sys[545.530]: phc offset -333 s2 freq -37426 delay 2747 phc2sys[546.530]: phc offset 194 s2 freq -36999 delay 2749", "summary_interval 10", "ptp4l: [615.253] selected /dev/ptp0 as PTP clock ptp4l: [615.255] port 1: INITIALIZING to LISTENING on INITIALIZE ptp4l: [615.255] port 0: INITIALIZING to LISTENING on INITIALIZE ptp4l: [615.564] port 1: new foreign master 00a069.fffe.0b552d-1 ptp4l: [619.574] selected best master clock 00a069.fffe.0b552d ptp4l: [619.574] port 1: LISTENING to UNCALIBRATED on RS_SLAVE ptp4l: [623.573] port 1: UNCALIBRATED to SLAVE on MASTER_CLOCK_SELECTED ptp4l: [684.649] rms 669 max 3691 freq -29383 +/- 3735 delay 9232 +/- 122 ptp4l: [748.724] rms 253 max 588 freq -29787 +/- 221 delay 9219 +/- 158 ptp4l: [812.793] rms 287 max 673 freq -29802 +/- 248 delay 9211 +/- 183 ptp4l: [876.853] rms 226 max 534 freq -29795 +/- 197 delay 9221 +/- 138 ptp4l: [940.925] rms 250 max 562 freq -29801 +/- 218 delay 9199 +/- 148 ptp4l: [1004.988] rms 226 max 525 freq -29802 +/- 196 delay 9228 +/- 143 ptp4l: [1069.065] rms 300 max 646 freq -29802 +/- 259 delay 9214 +/- 176 ptp4l: [1133.125] rms 226 max 505 freq -29792 +/- 197 delay 9225 +/- 159 ptp4l: [1197.185] rms 244 max 688 freq -29790 +/- 211 delay 9201 +/- 162", "~]# phc2sys -l 5", "~]# phc2sys -u summary-updates", "~]# phc2sys -s eth3 -w -m -u 60 phc2sys[700.948]: rms 1837 max 10123 freq -36474 +/- 4752 delay 2752 +/- 16 phc2sys[760.954]: rms 194 max 457 freq -37084 +/- 174 delay 2753 +/- 12 phc2sys[820.963]: rms 211 max 487 freq -37085 +/- 185 delay 2750 +/- 19 phc2sys[880.968]: rms 183 max 440 freq -37102 +/- 164 delay 2734 +/- 91 phc2sys[940.973]: rms 244 max 584 freq -37095 +/- 216 delay 2748 +/- 16 phc2sys[1000.979]: rms 220 max 573 freq -36666 +/- 182 delay 2747 +/- 43 phc2sys[1060.984]: rms 266 max 675 freq -36759 +/- 234 delay 2753 +/- 17", "~]# cat /etc/ntp.conf server 127.127.1.0 fudge 127.127.1.0 stratum 0", "~]# cat /etc/ptp4l.conf [global] priority1 127 ptp4l -f /etc/ptp4l.conf", "OPTIONS=\"-a -r\"", "~]# vi /etc/sysconfig/phc2sys OPTIONS=\"-a -r -r\"", "~]# systemctl restart phc2sys", "~]# phc2sys -a -r -r -P 0.01 -I 0.0001", "~]# systemctl start timemaster", "~]USD less /etc/timemaster.conf Configuration file for timemaster #[ntp_server ntp-server.local] #minpoll 4 #maxpoll 4 #[ptp_domain 0] #interfaces eth0 [timemaster] ntp_program chronyd [chrony.conf] include /etc/chrony.conf [ntp.conf] includefile /etc/ntp.conf [ptp4l.conf] [chronyd] path /usr/sbin/chronyd options -u chrony [ntpd] path /usr/sbin/ntpd options -u ntp:ntp -g [phc2sys] path /usr/sbin/phc2sys [ptp4l] path /usr/sbin/ptp4l", "[ntp_server address ]", "[ptp_domain number ]", "[timemaster]", "~]# vi /etc/timemaster.conf", "[ptp_domain 0] interfaces eth0 [ptp_domain 1] interfaces eth1", "~]# systemctl restart timemaster", "clock_servo linreg", "~]# systemctl restart ptp4l", "-E linreg", "~]# systemctl restart phc2sys" ]
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/system_administrators_guide/ch-Configuring_PTP_Using_ptp4l
Installing on IBM Power
Installing on IBM Power OpenShift Container Platform 4.12 Installing OpenShift Container Platform on IBM Power Red Hat OpenShift Documentation Team
[ "USDTTL 1W @ IN SOA ns1.example.com. root ( 2019070700 ; serial 3H ; refresh (3 hours) 30M ; retry (30 minutes) 2W ; expiry (2 weeks) 1W ) ; minimum (1 week) IN NS ns1.example.com. IN MX 10 smtp.example.com. ; ; ns1.example.com. IN A 192.168.1.5 smtp.example.com. IN A 192.168.1.5 ; helper.example.com. IN A 192.168.1.5 helper.ocp4.example.com. IN A 192.168.1.5 ; api.ocp4.example.com. IN A 192.168.1.5 1 api-int.ocp4.example.com. IN A 192.168.1.5 2 ; *.apps.ocp4.example.com. IN A 192.168.1.5 3 ; bootstrap.ocp4.example.com. IN A 192.168.1.96 4 ; control-plane0.ocp4.example.com. IN A 192.168.1.97 5 control-plane1.ocp4.example.com. IN A 192.168.1.98 6 control-plane2.ocp4.example.com. IN A 192.168.1.99 7 ; compute0.ocp4.example.com. IN A 192.168.1.11 8 compute1.ocp4.example.com. IN A 192.168.1.7 9 ; ;EOF", "USDTTL 1W @ IN SOA ns1.example.com. root ( 2019070700 ; serial 3H ; refresh (3 hours) 30M ; retry (30 minutes) 2W ; expiry (2 weeks) 1W ) ; minimum (1 week) IN NS ns1.example.com. ; 5.1.168.192.in-addr.arpa. IN PTR api.ocp4.example.com. 1 5.1.168.192.in-addr.arpa. IN PTR api-int.ocp4.example.com. 2 ; 96.1.168.192.in-addr.arpa. IN PTR bootstrap.ocp4.example.com. 3 ; 97.1.168.192.in-addr.arpa. IN PTR control-plane0.ocp4.example.com. 4 98.1.168.192.in-addr.arpa. IN PTR control-plane1.ocp4.example.com. 5 99.1.168.192.in-addr.arpa. IN PTR control-plane2.ocp4.example.com. 6 ; 11.1.168.192.in-addr.arpa. IN PTR compute0.ocp4.example.com. 7 7.1.168.192.in-addr.arpa. IN PTR compute1.ocp4.example.com. 8 ; ;EOF", "global log 127.0.0.1 local2 pidfile /var/run/haproxy.pid maxconn 4000 daemon defaults mode http log global option dontlognull option http-server-close option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 listen api-server-6443 1 bind *:6443 mode tcp option httpchk GET /readyz HTTP/1.0 option log-health-checks balance roundrobin server bootstrap bootstrap.ocp4.example.com:6443 verify none check check-ssl inter 10s fall 2 rise 3 backup 2 server master0 master0.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 server master1 master1.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 server master2 master2.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 listen machine-config-server-22623 3 bind *:22623 mode tcp server bootstrap bootstrap.ocp4.example.com:22623 check inter 1s backup 4 server master0 master0.ocp4.example.com:22623 check inter 1s server master1 master1.ocp4.example.com:22623 check inter 1s server master2 master2.ocp4.example.com:22623 check inter 1s listen ingress-router-443 5 bind *:443 mode tcp balance source server worker0 worker0.ocp4.example.com:443 check inter 1s server worker1 worker1.ocp4.example.com:443 check inter 1s listen ingress-router-80 6 bind *:80 mode tcp balance source server worker0 worker0.ocp4.example.com:80 check inter 1s server worker1 worker1.ocp4.example.com:80 check inter 1s", "dig +noall +answer @<nameserver_ip> api.<cluster_name>.<base_domain> 1", "api.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> api-int.<cluster_name>.<base_domain>", "api-int.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> random.apps.<cluster_name>.<base_domain>", "random.apps.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> console-openshift-console.apps.<cluster_name>.<base_domain>", "console-openshift-console.apps.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> bootstrap.<cluster_name>.<base_domain>", "bootstrap.ocp4.example.com. 604800 IN A 192.168.1.96", "dig +noall +answer @<nameserver_ip> -x 192.168.1.5", "5.1.168.192.in-addr.arpa. 604800 IN PTR api-int.ocp4.example.com. 1 5.1.168.192.in-addr.arpa. 604800 IN PTR api.ocp4.example.com. 2", "dig +noall +answer @<nameserver_ip> -x 192.168.1.96", "96.1.168.192.in-addr.arpa. 604800 IN PTR bootstrap.ocp4.example.com.", "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "tar -xvf openshift-install-linux.tar.gz", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "mkdir <installation_directory>", "{ \"auths\":{ \"cloud.openshift.com\":{ \"auth\":\"b3Blb=\", \"email\":\"[email protected]\" }, \"quay.io\":{ \"auth\":\"b3Blb=\", \"email\":\"[email protected]\" } } }", "networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 - cidr: fd00:10:128::/56 hostPrefix: 64 serviceNetwork: - 172.30.0.0/16 - fd00:172:16::/112", "networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23", "networking: serviceNetwork: - 172.30.0.0/16", "networking: machineNetwork: - cidr: 10.0.0.0/16", "apiVersion: v1 baseDomain: example.com 1 compute: 2 - hyperthreading: Enabled 3 name: worker replicas: 0 4 architecture: ppc64le controlPlane: 5 hyperthreading: Enabled 6 name: master replicas: 3 7 architecture: ppc64le metadata: name: test 8 networking: clusterNetwork: - cidr: 10.128.0.0/14 9 hostPrefix: 23 10 networkType: OVNKubernetes 11 serviceNetwork: 12 - 172.30.0.0/16 platform: none: {} 13 fips: false 14 pullSecret: '{\"auths\": ...}' 15 sshKey: 'ssh-ed25519 AAAA...' 16", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "compute: - name: worker platform: {} replicas: 0", "spec: clusterNetwork: - cidr: 10.128.0.0/19 hostPrefix: 23 - cidr: 10.128.32.0/19 hostPrefix: 23", "spec: serviceNetwork: - 172.30.0.0/14", "defaultNetwork: type: OpenShiftSDN openshiftSDNConfig: mode: NetworkPolicy mtu: 1450 vxlanPort: 4789", "defaultNetwork: type: OVNKubernetes ovnKubernetesConfig: mtu: 1400 genevePort: 6081 ipsecConfig: {}", "kubeProxyConfig: proxyArguments: iptables-min-sync-period: - 0s", "./openshift-install create manifests --dir <installation_directory> 1", "./openshift-install create ignition-configs --dir <installation_directory> 1", ". ├── auth │ ├── kubeadmin-password │ └── kubeconfig ├── bootstrap.ign ├── master.ign ├── metadata.json └── worker.ign", "sha512sum <installation_directory>/bootstrap.ign", "curl -k http://<HTTP_server>/bootstrap.ign 1", "% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0{\"ignition\":{\"version\":\"3.2.0\"},\"passwd\":{\"users\":[{\"name\":\"core\",\"sshAuthorizedKeys\":[\"ssh-rsa", "openshift-install coreos print-stream-json | grep '\\.iso[^.]'", "\"location\": \"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live.aarch64.iso\", \"location\": \"<url>/art/storage/releases/rhcos-4.12-ppc64le/<release>/ppc64le/rhcos-<release>-live.ppc64le.iso\", \"location\": \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live.s390x.iso\", \"location\": \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live.x86_64.iso\",", "sudo coreos-installer install --ignition-url=http://<HTTP_server>/<node_type>.ign <device> --ignition-hash=sha512-<digest> 1 2", "sudo coreos-installer install --ignition-url=http://192.168.1.2:80/installation_directory/bootstrap.ign /dev/sda --ignition-hash=sha512-a5a2d43879223273c9b60af66b44202a1d1248fc01cf156c46d4a79f552b6bad47bc8cc78ddf0116e80c59d2ea9e32ba53bc807afbca581aa059311def2c3e3b", "Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) Ignition: user-provided config was applied", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none nameserver=4.4.4.41", "ip=10.10.10.2::10.10.10.254:255.255.255.0::enp1s0:none nameserver=4.4.4.41", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none ip=10.10.10.3::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none", "ip=::10.10.10.254::::", "rd.route=20.20.20.0/24:20.20.20.254:enp2s0", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none ip=::::core0.example.com:enp2s0:none", "ip=enp1s0:dhcp ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0.100:none vlan=enp2s0.100:enp2s0", "ip=enp2s0.100:dhcp vlan=enp2s0.100:enp2s0", "nameserver=1.1.1.1 nameserver=8.8.8.8", "bond=bond0:em1,em2:mode=active-backup ip=bond0:dhcp", "bond=bond0:em1,em2:mode=active-backup ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0:none", "ip=bond0.100:dhcp bond=bond0:em1,em2:mode=active-backup vlan=bond0.100:bond0", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0.100:none bond=bond0:em1,em2:mode=active-backup vlan=bond0.100:bond0", "team=team0:em1,em2 ip=team0:dhcp", "curl -k http://<HTTP_server>/bootstrap.ign 1", "% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0{\"ignition\":{\"version\":\"3.2.0\"},\"passwd\":{\"users\":[{\"name\":\"core\",\"sshAuthorizedKeys\":[\"ssh-rsa", "openshift-install coreos print-stream-json | grep -Eo '\"https.*(kernel-|initramfs.|rootfs.)\\w+(\\.img)?\"'", "\"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live-kernel-aarch64\" \"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live-initramfs.aarch64.img\" \"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live-rootfs.aarch64.img\" \"<url>/art/storage/releases/rhcos-4.12-ppc64le/49.84.202110081256-0/ppc64le/rhcos-<release>-live-kernel-ppc64le\" \"<url>/art/storage/releases/rhcos-4.12-ppc64le/<release>/ppc64le/rhcos-<release>-live-initramfs.ppc64le.img\" \"<url>/art/storage/releases/rhcos-4.12-ppc64le/<release>/ppc64le/rhcos-<release>-live-rootfs.ppc64le.img\" \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live-kernel-s390x\" \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live-initramfs.s390x.img\" \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live-rootfs.s390x.img\" \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live-kernel-x86_64\" \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live-initramfs.x86_64.img\" \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live-rootfs.x86_64.img\"", "DEFAULT pxeboot TIMEOUT 20 PROMPT 0 LABEL pxeboot KERNEL http://<HTTP_server>/rhcos-<version>-live-kernel-<architecture> 1 APPEND initrd=http://<HTTP_server>/rhcos-<version>-live-initramfs.<architecture>.img coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/bootstrap.ign 2 3", "Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) Ignition: user-provided config was applied", "./openshift-install create manifests --dir <installation_directory>", "apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: \"master\" name: 99-master-kargs-mpath spec: kernelArguments: - 'rd.multipath=default' - 'root=/dev/disk/by-label/dm-mpath-root'", "apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: \"worker\" name: 99-worker-kargs-mpath spec: kernelArguments: - 'rd.multipath=default' - 'root=/dev/disk/by-label/dm-mpath-root'", "bootlist -m normal -o sda", "bootlist -m normal -o /dev/sdc /dev/sdd /dev/sde sdc sdd sde", "./openshift-install --dir <installation_directory> wait-for bootstrap-complete \\ 1 --log-level=info 2", "INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443 INFO API v1.25.0 up INFO Waiting up to 30m0s for bootstrapping to complete INFO It is now safe to remove the bootstrap resources", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 63m v1.25.0 master-1 Ready master 63m v1.25.0 master-2 Ready master 64m v1.25.0", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-8b2br 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending csr-8vnps 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-bfd72 5m26s system:node:ip-10-0-50-126.us-east-2.compute.internal Pending csr-c57lv 5m26s system:node:ip-10-0-95-157.us-east-2.compute.internal Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs oc adm certificate approve", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 73m v1.25.0 master-1 Ready master 73m v1.25.0 master-2 Ready master 74m v1.25.0 worker-0 Ready worker 11m v1.25.0 worker-1 Ready worker 11m v1.25.0", "watch -n5 oc get clusteroperators", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE authentication 4.12.0 True False False 19m baremetal 4.12.0 True False False 37m cloud-credential 4.12.0 True False False 40m cluster-autoscaler 4.12.0 True False False 37m config-operator 4.12.0 True False False 38m console 4.12.0 True False False 26m csi-snapshot-controller 4.12.0 True False False 37m dns 4.12.0 True False False 37m etcd 4.12.0 True False False 36m image-registry 4.12.0 True False False 31m ingress 4.12.0 True False False 30m insights 4.12.0 True False False 31m kube-apiserver 4.12.0 True False False 26m kube-controller-manager 4.12.0 True False False 36m kube-scheduler 4.12.0 True False False 36m kube-storage-version-migrator 4.12.0 True False False 37m machine-api 4.12.0 True False False 29m machine-approver 4.12.0 True False False 37m machine-config 4.12.0 True False False 36m marketplace 4.12.0 True False False 37m monitoring 4.12.0 True False False 29m network 4.12.0 True False False 38m node-tuning 4.12.0 True False False 37m openshift-apiserver 4.12.0 True False False 32m openshift-controller-manager 4.12.0 True False False 30m openshift-samples 4.12.0 True False False 32m operator-lifecycle-manager 4.12.0 True False False 37m operator-lifecycle-manager-catalog 4.12.0 True False False 37m operator-lifecycle-manager-packageserver 4.12.0 True False False 32m service-ca 4.12.0 True False False 38m storage 4.12.0 True False False 37m", "oc get pod -n openshift-image-registry -l docker-registry=default", "No resources found in openshift-image-registry namespace", "oc edit configs.imageregistry.operator.openshift.io", "storage: pvc: claim:", "oc get clusteroperator image-registry", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.12 True False False 6h50m", "oc edit configs.imageregistry/cluster", "managementState: Removed", "managementState: Managed", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}'", "Error from server (NotFound): configs.imageregistry.operator.openshift.io \"cluster\" not found", "watch -n5 oc get clusteroperators", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE authentication 4.12.0 True False False 19m baremetal 4.12.0 True False False 37m cloud-credential 4.12.0 True False False 40m cluster-autoscaler 4.12.0 True False False 37m config-operator 4.12.0 True False False 38m console 4.12.0 True False False 26m csi-snapshot-controller 4.12.0 True False False 37m dns 4.12.0 True False False 37m etcd 4.12.0 True False False 36m image-registry 4.12.0 True False False 31m ingress 4.12.0 True False False 30m insights 4.12.0 True False False 31m kube-apiserver 4.12.0 True False False 26m kube-controller-manager 4.12.0 True False False 36m kube-scheduler 4.12.0 True False False 36m kube-storage-version-migrator 4.12.0 True False False 37m machine-api 4.12.0 True False False 29m machine-approver 4.12.0 True False False 37m machine-config 4.12.0 True False False 36m marketplace 4.12.0 True False False 37m monitoring 4.12.0 True False False 29m network 4.12.0 True False False 38m node-tuning 4.12.0 True False False 37m openshift-apiserver 4.12.0 True False False 32m openshift-controller-manager 4.12.0 True False False 30m openshift-samples 4.12.0 True False False 32m operator-lifecycle-manager 4.12.0 True False False 37m operator-lifecycle-manager-catalog 4.12.0 True False False 37m operator-lifecycle-manager-packageserver 4.12.0 True False False 32m service-ca 4.12.0 True False False 38m storage 4.12.0 True False False 37m", "./openshift-install --dir <installation_directory> wait-for install-complete 1", "INFO Waiting up to 30m0s for the cluster to initialize", "oc get pods --all-namespaces", "NAMESPACE NAME READY STATUS RESTARTS AGE openshift-apiserver-operator openshift-apiserver-operator-85cb746d55-zqhs8 1/1 Running 1 9m openshift-apiserver apiserver-67b9g 1/1 Running 0 3m openshift-apiserver apiserver-ljcmx 1/1 Running 0 1m openshift-apiserver apiserver-z25h4 1/1 Running 0 2m openshift-authentication-operator authentication-operator-69d5d8bf84-vh2n8 1/1 Running 0 5m", "oc logs <pod_name> -n <namespace> 1", "USDTTL 1W @ IN SOA ns1.example.com. root ( 2019070700 ; serial 3H ; refresh (3 hours) 30M ; retry (30 minutes) 2W ; expiry (2 weeks) 1W ) ; minimum (1 week) IN NS ns1.example.com. IN MX 10 smtp.example.com. ; ; ns1.example.com. IN A 192.168.1.5 smtp.example.com. IN A 192.168.1.5 ; helper.example.com. IN A 192.168.1.5 helper.ocp4.example.com. IN A 192.168.1.5 ; api.ocp4.example.com. IN A 192.168.1.5 1 api-int.ocp4.example.com. IN A 192.168.1.5 2 ; *.apps.ocp4.example.com. IN A 192.168.1.5 3 ; bootstrap.ocp4.example.com. IN A 192.168.1.96 4 ; control-plane0.ocp4.example.com. IN A 192.168.1.97 5 control-plane1.ocp4.example.com. IN A 192.168.1.98 6 control-plane2.ocp4.example.com. IN A 192.168.1.99 7 ; compute0.ocp4.example.com. IN A 192.168.1.11 8 compute1.ocp4.example.com. IN A 192.168.1.7 9 ; ;EOF", "USDTTL 1W @ IN SOA ns1.example.com. root ( 2019070700 ; serial 3H ; refresh (3 hours) 30M ; retry (30 minutes) 2W ; expiry (2 weeks) 1W ) ; minimum (1 week) IN NS ns1.example.com. ; 5.1.168.192.in-addr.arpa. IN PTR api.ocp4.example.com. 1 5.1.168.192.in-addr.arpa. IN PTR api-int.ocp4.example.com. 2 ; 96.1.168.192.in-addr.arpa. IN PTR bootstrap.ocp4.example.com. 3 ; 97.1.168.192.in-addr.arpa. IN PTR control-plane0.ocp4.example.com. 4 98.1.168.192.in-addr.arpa. IN PTR control-plane1.ocp4.example.com. 5 99.1.168.192.in-addr.arpa. IN PTR control-plane2.ocp4.example.com. 6 ; 11.1.168.192.in-addr.arpa. IN PTR compute0.ocp4.example.com. 7 7.1.168.192.in-addr.arpa. IN PTR compute1.ocp4.example.com. 8 ; ;EOF", "global log 127.0.0.1 local2 pidfile /var/run/haproxy.pid maxconn 4000 daemon defaults mode http log global option dontlognull option http-server-close option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 listen api-server-6443 1 bind *:6443 mode tcp option httpchk GET /readyz HTTP/1.0 option log-health-checks balance roundrobin server bootstrap bootstrap.ocp4.example.com:6443 verify none check check-ssl inter 10s fall 2 rise 3 backup 2 server master0 master0.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 server master1 master1.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 server master2 master2.ocp4.example.com:6443 weight 1 verify none check check-ssl inter 10s fall 2 rise 3 listen machine-config-server-22623 3 bind *:22623 mode tcp server bootstrap bootstrap.ocp4.example.com:22623 check inter 1s backup 4 server master0 master0.ocp4.example.com:22623 check inter 1s server master1 master1.ocp4.example.com:22623 check inter 1s server master2 master2.ocp4.example.com:22623 check inter 1s listen ingress-router-443 5 bind *:443 mode tcp balance source server worker0 worker0.ocp4.example.com:443 check inter 1s server worker1 worker1.ocp4.example.com:443 check inter 1s listen ingress-router-80 6 bind *:80 mode tcp balance source server worker0 worker0.ocp4.example.com:80 check inter 1s server worker1 worker1.ocp4.example.com:80 check inter 1s", "dig +noall +answer @<nameserver_ip> api.<cluster_name>.<base_domain> 1", "api.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> api-int.<cluster_name>.<base_domain>", "api-int.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> random.apps.<cluster_name>.<base_domain>", "random.apps.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> console-openshift-console.apps.<cluster_name>.<base_domain>", "console-openshift-console.apps.ocp4.example.com. 604800 IN A 192.168.1.5", "dig +noall +answer @<nameserver_ip> bootstrap.<cluster_name>.<base_domain>", "bootstrap.ocp4.example.com. 604800 IN A 192.168.1.96", "dig +noall +answer @<nameserver_ip> -x 192.168.1.5", "5.1.168.192.in-addr.arpa. 604800 IN PTR api-int.ocp4.example.com. 1 5.1.168.192.in-addr.arpa. 604800 IN PTR api.ocp4.example.com. 2", "dig +noall +answer @<nameserver_ip> -x 192.168.1.96", "96.1.168.192.in-addr.arpa. 604800 IN PTR bootstrap.ocp4.example.com.", "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "mkdir <installation_directory>", "{ \"auths\":{ \"cloud.openshift.com\":{ \"auth\":\"b3Blb=\", \"email\":\"[email protected]\" }, \"quay.io\":{ \"auth\":\"b3Blb=\", \"email\":\"[email protected]\" } } }", "networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 - cidr: fd00:10:128::/56 hostPrefix: 64 serviceNetwork: - 172.30.0.0/16 - fd00:172:16::/112", "networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23", "networking: serviceNetwork: - 172.30.0.0/16", "networking: machineNetwork: - cidr: 10.0.0.0/16", "apiVersion: v1 baseDomain: example.com 1 compute: 2 - hyperthreading: Enabled 3 name: worker replicas: 0 4 architecture : ppc64le controlPlane: 5 hyperthreading: Enabled 6 name: master replicas: 3 7 architecture: ppc64le metadata: name: test 8 networking: clusterNetwork: - cidr: 10.128.0.0/14 9 hostPrefix: 23 10 networkType: OVNKubernetes 11 serviceNetwork: 12 - 172.30.0.0/16 platform: none: {} 13 fips: false 14 pullSecret: '{\"auths\":{\"<local_registry>\": {\"auth\": \"<credentials>\",\"email\": \"[email protected]\"}}}' 15 sshKey: 'ssh-ed25519 AAAA...' 16 additionalTrustBundle: | 17 -----BEGIN CERTIFICATE----- ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ -----END CERTIFICATE----- imageContentSources: 18 - mirrors: - <local_registry>/<local_repository_name>/release source: quay.io/openshift-release-dev/ocp-release - mirrors: - <local_registry>/<local_repository_name>/release source: quay.io/openshift-release-dev/ocp-v4.0-art-dev", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "compute: - name: worker platform: {} replicas: 0", "spec: clusterNetwork: - cidr: 10.128.0.0/19 hostPrefix: 23 - cidr: 10.128.32.0/19 hostPrefix: 23", "spec: serviceNetwork: - 172.30.0.0/14", "defaultNetwork: type: OpenShiftSDN openshiftSDNConfig: mode: NetworkPolicy mtu: 1450 vxlanPort: 4789", "defaultNetwork: type: OVNKubernetes ovnKubernetesConfig: mtu: 1400 genevePort: 6081 ipsecConfig: {}", "kubeProxyConfig: proxyArguments: iptables-min-sync-period: - 0s", "./openshift-install create manifests --dir <installation_directory> 1", "./openshift-install create ignition-configs --dir <installation_directory> 1", ". ├── auth │ ├── kubeadmin-password │ └── kubeconfig ├── bootstrap.ign ├── master.ign ├── metadata.json └── worker.ign", "sha512sum <installation_directory>/bootstrap.ign", "curl -k http://<HTTP_server>/bootstrap.ign 1", "% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0{\"ignition\":{\"version\":\"3.2.0\"},\"passwd\":{\"users\":[{\"name\":\"core\",\"sshAuthorizedKeys\":[\"ssh-rsa", "openshift-install coreos print-stream-json | grep '\\.iso[^.]'", "\"location\": \"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live.aarch64.iso\", \"location\": \"<url>/art/storage/releases/rhcos-4.12-ppc64le/<release>/ppc64le/rhcos-<release>-live.ppc64le.iso\", \"location\": \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live.s390x.iso\", \"location\": \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live.x86_64.iso\",", "sudo coreos-installer install --ignition-url=http://<HTTP_server>/<node_type>.ign <device> --ignition-hash=sha512-<digest> 1 2", "sudo coreos-installer install --ignition-url=http://192.168.1.2:80/installation_directory/bootstrap.ign /dev/sda --ignition-hash=sha512-a5a2d43879223273c9b60af66b44202a1d1248fc01cf156c46d4a79f552b6bad47bc8cc78ddf0116e80c59d2ea9e32ba53bc807afbca581aa059311def2c3e3b", "Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) Ignition: user-provided config was applied", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none nameserver=4.4.4.41", "ip=10.10.10.2::10.10.10.254:255.255.255.0::enp1s0:none nameserver=4.4.4.41", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none ip=10.10.10.3::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none", "ip=::10.10.10.254::::", "rd.route=20.20.20.0/24:20.20.20.254:enp2s0", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp1s0:none ip=::::core0.example.com:enp2s0:none", "ip=enp1s0:dhcp ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0:none", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:enp2s0.100:none vlan=enp2s0.100:enp2s0", "ip=enp2s0.100:dhcp vlan=enp2s0.100:enp2s0", "nameserver=1.1.1.1 nameserver=8.8.8.8", "bond=bond0:em1,em2:mode=active-backup ip=bond0:dhcp", "bond=bond0:em1,em2:mode=active-backup ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0:none", "ip=bond0.100:dhcp bond=bond0:em1,em2:mode=active-backup vlan=bond0.100:bond0", "ip=10.10.10.2::10.10.10.254:255.255.255.0:core0.example.com:bond0.100:none bond=bond0:em1,em2:mode=active-backup vlan=bond0.100:bond0", "team=team0:em1,em2 ip=team0:dhcp", "curl -k http://<HTTP_server>/bootstrap.ign 1", "% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0{\"ignition\":{\"version\":\"3.2.0\"},\"passwd\":{\"users\":[{\"name\":\"core\",\"sshAuthorizedKeys\":[\"ssh-rsa", "openshift-install coreos print-stream-json | grep -Eo '\"https.*(kernel-|initramfs.|rootfs.)\\w+(\\.img)?\"'", "\"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live-kernel-aarch64\" \"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live-initramfs.aarch64.img\" \"<url>/art/storage/releases/rhcos-4.12-aarch64/<release>/aarch64/rhcos-<release>-live-rootfs.aarch64.img\" \"<url>/art/storage/releases/rhcos-4.12-ppc64le/49.84.202110081256-0/ppc64le/rhcos-<release>-live-kernel-ppc64le\" \"<url>/art/storage/releases/rhcos-4.12-ppc64le/<release>/ppc64le/rhcos-<release>-live-initramfs.ppc64le.img\" \"<url>/art/storage/releases/rhcos-4.12-ppc64le/<release>/ppc64le/rhcos-<release>-live-rootfs.ppc64le.img\" \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live-kernel-s390x\" \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live-initramfs.s390x.img\" \"<url>/art/storage/releases/rhcos-4.12-s390x/<release>/s390x/rhcos-<release>-live-rootfs.s390x.img\" \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live-kernel-x86_64\" \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live-initramfs.x86_64.img\" \"<url>/art/storage/releases/rhcos-4.12/<release>/x86_64/rhcos-<release>-live-rootfs.x86_64.img\"", "DEFAULT pxeboot TIMEOUT 20 PROMPT 0 LABEL pxeboot KERNEL http://<HTTP_server>/rhcos-<version>-live-kernel-<architecture> 1 APPEND initrd=http://<HTTP_server>/rhcos-<version>-live-initramfs.<architecture>.img coreos.live.rootfs_url=http://<HTTP_server>/rhcos-<version>-live-rootfs.<architecture>.img coreos.inst.install_dev=/dev/sda coreos.inst.ignition_url=http://<HTTP_server>/bootstrap.ign 2 3", "Ignition: ran on 2022/03/14 14:48:33 UTC (this boot) Ignition: user-provided config was applied", "./openshift-install create manifests --dir <installation_directory>", "apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: \"master\" name: 99-master-kargs-mpath spec: kernelArguments: - 'rd.multipath=default' - 'root=/dev/disk/by-label/dm-mpath-root'", "apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: \"worker\" name: 99-worker-kargs-mpath spec: kernelArguments: - 'rd.multipath=default' - 'root=/dev/disk/by-label/dm-mpath-root'", "bootlist -m normal -o sda", "bootlist -m normal -o /dev/sdc /dev/sdd /dev/sde sdc sdd sde", "./openshift-install --dir <installation_directory> wait-for bootstrap-complete \\ 1 --log-level=info 2", "INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443 INFO API v1.25.0 up INFO Waiting up to 30m0s for bootstrapping to complete INFO It is now safe to remove the bootstrap resources", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 63m v1.25.0 master-1 Ready master 63m v1.25.0 master-2 Ready master 64m v1.25.0", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-8b2br 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending csr-8vnps 15m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs --no-run-if-empty oc adm certificate approve", "oc get csr", "NAME AGE REQUESTOR CONDITION csr-bfd72 5m26s system:node:ip-10-0-50-126.us-east-2.compute.internal Pending csr-c57lv 5m26s system:node:ip-10-0-95-157.us-east-2.compute.internal Pending", "oc adm certificate approve <csr_name> 1", "oc get csr -o go-template='{{range .items}}{{if not .status}}{{.metadata.name}}{{\"\\n\"}}{{end}}{{end}}' | xargs oc adm certificate approve", "oc get nodes", "NAME STATUS ROLES AGE VERSION master-0 Ready master 73m v1.25.0 master-1 Ready master 73m v1.25.0 master-2 Ready master 74m v1.25.0 worker-0 Ready worker 11m v1.25.0 worker-1 Ready worker 11m v1.25.0", "watch -n5 oc get clusteroperators", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE authentication 4.12.0 True False False 19m baremetal 4.12.0 True False False 37m cloud-credential 4.12.0 True False False 40m cluster-autoscaler 4.12.0 True False False 37m config-operator 4.12.0 True False False 38m console 4.12.0 True False False 26m csi-snapshot-controller 4.12.0 True False False 37m dns 4.12.0 True False False 37m etcd 4.12.0 True False False 36m image-registry 4.12.0 True False False 31m ingress 4.12.0 True False False 30m insights 4.12.0 True False False 31m kube-apiserver 4.12.0 True False False 26m kube-controller-manager 4.12.0 True False False 36m kube-scheduler 4.12.0 True False False 36m kube-storage-version-migrator 4.12.0 True False False 37m machine-api 4.12.0 True False False 29m machine-approver 4.12.0 True False False 37m machine-config 4.12.0 True False False 36m marketplace 4.12.0 True False False 37m monitoring 4.12.0 True False False 29m network 4.12.0 True False False 38m node-tuning 4.12.0 True False False 37m openshift-apiserver 4.12.0 True False False 32m openshift-controller-manager 4.12.0 True False False 30m openshift-samples 4.12.0 True False False 32m operator-lifecycle-manager 4.12.0 True False False 37m operator-lifecycle-manager-catalog 4.12.0 True False False 37m operator-lifecycle-manager-packageserver 4.12.0 True False False 32m service-ca 4.12.0 True False False 38m storage 4.12.0 True False False 37m", "oc patch OperatorHub cluster --type json -p '[{\"op\": \"add\", \"path\": \"/spec/disableAllDefaultSources\", \"value\": true}]'", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"managementState\":\"Managed\"}}'", "oc get pod -n openshift-image-registry -l docker-registry=default", "No resources found in openshift-image-registry namespace", "oc edit configs.imageregistry.operator.openshift.io", "storage: pvc: claim:", "oc get clusteroperator image-registry", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.12 True False False 6h50m", "oc edit configs.imageregistry/cluster", "managementState: Removed", "managementState: Managed", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}'", "Error from server (NotFound): configs.imageregistry.operator.openshift.io \"cluster\" not found", "watch -n5 oc get clusteroperators", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE authentication 4.12.0 True False False 19m baremetal 4.12.0 True False False 37m cloud-credential 4.12.0 True False False 40m cluster-autoscaler 4.12.0 True False False 37m config-operator 4.12.0 True False False 38m console 4.12.0 True False False 26m csi-snapshot-controller 4.12.0 True False False 37m dns 4.12.0 True False False 37m etcd 4.12.0 True False False 36m image-registry 4.12.0 True False False 31m ingress 4.12.0 True False False 30m insights 4.12.0 True False False 31m kube-apiserver 4.12.0 True False False 26m kube-controller-manager 4.12.0 True False False 36m kube-scheduler 4.12.0 True False False 36m kube-storage-version-migrator 4.12.0 True False False 37m machine-api 4.12.0 True False False 29m machine-approver 4.12.0 True False False 37m machine-config 4.12.0 True False False 36m marketplace 4.12.0 True False False 37m monitoring 4.12.0 True False False 29m network 4.12.0 True False False 38m node-tuning 4.12.0 True False False 37m openshift-apiserver 4.12.0 True False False 32m openshift-controller-manager 4.12.0 True False False 30m openshift-samples 4.12.0 True False False 32m operator-lifecycle-manager 4.12.0 True False False 37m operator-lifecycle-manager-catalog 4.12.0 True False False 37m operator-lifecycle-manager-packageserver 4.12.0 True False False 32m service-ca 4.12.0 True False False 38m storage 4.12.0 True False False 37m", "./openshift-install --dir <installation_directory> wait-for install-complete 1", "INFO Waiting up to 30m0s for the cluster to initialize", "oc get pods --all-namespaces", "NAMESPACE NAME READY STATUS RESTARTS AGE openshift-apiserver-operator openshift-apiserver-operator-85cb746d55-zqhs8 1/1 Running 1 9m openshift-apiserver apiserver-67b9g 1/1 Running 0 3m openshift-apiserver apiserver-ljcmx 1/1 Running 0 1m openshift-apiserver apiserver-z25h4 1/1 Running 0 2m openshift-authentication-operator authentication-operator-69d5d8bf84-vh2n8 1/1 Running 0 5m", "oc logs <pod_name> -n <namespace> 1" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.12/html-single/installing_on_ibm_power/index
Builds using BuildConfig
Builds using BuildConfig OpenShift Container Platform 4.15 Builds Red Hat OpenShift Documentation Team
[ "kind: BuildConfig apiVersion: build.openshift.io/v1 metadata: name: \"ruby-sample-build\" 1 spec: runPolicy: \"Serial\" 2 triggers: 3 - type: \"GitHub\" github: secret: \"secret101\" - type: \"Generic\" generic: secret: \"secret101\" - type: \"ImageChange\" source: 4 git: uri: \"https://github.com/openshift/ruby-hello-world\" strategy: 5 sourceStrategy: from: kind: \"ImageStreamTag\" name: \"ruby-20-centos7:latest\" output: 6 to: kind: \"ImageStreamTag\" name: \"origin-ruby-sample:latest\" postCommit: 7 script: \"bundle exec rake test\"", "source: git: uri: https://github.com/openshift/ruby-hello-world.git 1 ref: \"master\" images: - from: kind: ImageStreamTag name: myinputimage:latest namespace: mynamespace paths: - destinationDir: app/dir/injected/dir 2 sourcePath: /usr/lib/somefile.jar contextDir: \"app/dir\" 3 dockerfile: \"FROM centos:7\\nRUN yum install -y httpd\" 4", "source: dockerfile: \"FROM centos:7\\nRUN yum install -y httpd\" 1", "source: git: uri: https://github.com/openshift/ruby-hello-world.git ref: \"master\" images: 1 - from: 2 kind: ImageStreamTag name: myinputimage:latest namespace: mynamespace paths: 3 - destinationDir: injected/dir 4 sourcePath: /usr/lib/somefile.jar 5 - from: kind: ImageStreamTag name: myotherinputimage:latest namespace: myothernamespace pullSecret: mysecret 6 paths: - destinationDir: injected/dir sourcePath: /usr/lib/somefile.jar", "oc secrets link builder dockerhub", "source: git: 1 uri: \"https://github.com/openshift/ruby-hello-world\" ref: \"master\" contextDir: \"app/dir\" 2 dockerfile: \"FROM openshift/ruby-22-centos7\\nUSER example\" 3", "source: git: uri: \"https://github.com/openshift/ruby-hello-world\" ref: \"master\" httpProxy: http://proxy.example.com httpsProxy: https://proxy.example.com noProxy: somedomain.com, otherdomain.com", "oc annotate secret mysecret 'build.openshift.io/source-secret-match-uri-1=ssh://bitbucket.atlassian.com:7999/*'", "kind: Secret apiVersion: v1 metadata: name: matches-all-corporate-servers-https-only annotations: build.openshift.io/source-secret-match-uri-1: https://*.mycorp.com/* data: --- kind: Secret apiVersion: v1 metadata: name: override-for-my-dev-servers-https-only annotations: build.openshift.io/source-secret-match-uri-1: https://mydev1.mycorp.com/* build.openshift.io/source-secret-match-uri-2: https://mydev2.mycorp.com/* data:", "oc annotate secret mysecret 'build.openshift.io/source-secret-match-uri-1=https://*.mycorp.com/*'", "apiVersion: \"build.openshift.io/v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: output: to: kind: \"ImageStreamTag\" name: \"sample-image:latest\" source: git: uri: \"https://github.com/user/app.git\" sourceSecret: name: \"basicsecret\" strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"python-33-centos7:latest\"", "oc set build-secret --source bc/sample-build basicsecret", "oc create secret generic <secret_name> --from-file=<path/to/.gitconfig>", "[http] sslVerify=false", "cat .gitconfig", "[user] name = <name> email = <email> [http] sslVerify = false sslCert = /var/run/secrets/openshift.io/source/client.crt sslKey = /var/run/secrets/openshift.io/source/client.key sslCaInfo = /var/run/secrets/openshift.io/source/cacert.crt", "oc create secret generic <secret_name> --from-literal=username=<user_name> \\ 1 --from-literal=password=<password> \\ 2 --from-file=.gitconfig=.gitconfig --from-file=client.crt=/var/run/secrets/openshift.io/source/client.crt --from-file=cacert.crt=/var/run/secrets/openshift.io/source/cacert.crt --from-file=client.key=/var/run/secrets/openshift.io/source/client.key", "oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --type=kubernetes.io/basic-auth", "oc create secret generic <secret_name> --from-literal=password=<token> --type=kubernetes.io/basic-auth", "ssh-keygen -t ed25519 -C \"[email protected]\"", "oc create secret generic <secret_name> --from-file=ssh-privatekey=<path/to/ssh/private/key> --from-file=<path/to/known_hosts> \\ 1 --type=kubernetes.io/ssh-auth", "cat intermediateCA.crt intermediateCA.crt rootCA.crt > ca.crt", "oc create secret generic mycert --from-file=ca.crt=</path/to/file> 1", "oc create secret generic <secret_name> --from-file=ssh-privatekey=<path/to/ssh/private/key> --from-file=<path/to/.gitconfig> --type=kubernetes.io/ssh-auth", "oc create secret generic <secret_name> --from-file=ca.crt=<path/to/certificate> --from-file=<path/to/.gitconfig>", "oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --from-file=ca-cert=</path/to/file> --type=kubernetes.io/basic-auth", "oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --from-file=</path/to/.gitconfig> --type=kubernetes.io/basic-auth", "oc create secret generic <secret_name> --from-literal=username=<user_name> --from-literal=password=<password> --from-file=</path/to/.gitconfig> --from-file=ca-cert=</path/to/file> --type=kubernetes.io/basic-auth", "apiVersion: v1 kind: Secret metadata: name: test-secret namespace: my-namespace type: Opaque 1 data: 2 username: <username> 3 password: <password> stringData: 4 hostname: myapp.mydomain.com 5", "oc create -f <filename>", "oc create secret generic dockerhub --from-file=.dockerconfigjson=<path/to/.docker/config.json> --type=kubernetes.io/dockerconfigjson", "apiVersion: v1 kind: Secret metadata: name: mysecret type: Opaque 1 data: username: <username> password: <password>", "apiVersion: v1 kind: Secret metadata: name: aregistrykey namespace: myapps type: kubernetes.io/dockerconfigjson 1 data: .dockerconfigjson:bm5ubm5ubm5ubm5ubm5ubm5ubm5ubmdnZ2dnZ2dnZ2dnZ2dnZ2dnZ2cgYXV0aCBrZXlzCg== 2", "oc create -f <your_yaml_file>.yaml", "oc logs secret-example-pod", "oc delete pod secret-example-pod", "apiVersion: v1 kind: Secret metadata: name: test-secret data: username: <username> 1 password: <password> 2 stringData: hostname: myapp.mydomain.com 3 secret.properties: |- 4 property1=valueA property2=valueB", "apiVersion: v1 kind: Pod metadata: name: secret-example-pod spec: containers: - name: secret-test-container image: busybox command: [ \"/bin/sh\", \"-c\", \"cat /etc/secret-volume/*\" ] volumeMounts: # name must match the volume name below - name: secret-volume mountPath: /etc/secret-volume readOnly: true volumes: - name: secret-volume secret: secretName: test-secret restartPolicy: Never", "apiVersion: v1 kind: Pod metadata: name: secret-example-pod spec: containers: - name: secret-test-container image: busybox command: [ \"/bin/sh\", \"-c\", \"export\" ] env: - name: TEST_SECRET_USERNAME_ENV_VAR valueFrom: secretKeyRef: name: test-secret key: username restartPolicy: Never", "apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: secret-example-bc spec: strategy: sourceStrategy: env: - name: TEST_SECRET_USERNAME_ENV_VAR valueFrom: secretKeyRef: name: test-secret key: username", "oc create configmap settings-mvn --from-file=settings.xml=<path/to/settings.xml>", "apiVersion: core/v1 kind: ConfigMap metadata: name: settings-mvn data: settings.xml: | <settings> ... # Insert maven settings here </settings>", "oc create secret generic secret-mvn --from-file=ssh-privatekey=<path/to/.ssh/id_rsa> --type=kubernetes.io/ssh-auth", "apiVersion: core/v1 kind: Secret metadata: name: secret-mvn type: kubernetes.io/ssh-auth data: ssh-privatekey: | # Insert ssh private key, base64 encoded", "source: git: uri: https://github.com/wildfly/quickstart.git contextDir: helloworld configMaps: - configMap: name: settings-mvn secrets: - secret: name: secret-mvn", "oc new-build openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git --context-dir helloworld --build-secret \"secret-mvn\" --build-config-map \"settings-mvn\"", "source: git: uri: https://github.com/wildfly/quickstart.git contextDir: helloworld configMaps: - configMap: name: settings-mvn destinationDir: \".m2\" secrets: - secret: name: secret-mvn destinationDir: \".ssh\"", "oc new-build openshift/wildfly-101-centos7~https://github.com/wildfly/quickstart.git --context-dir helloworld --build-secret \"secret-mvn:.ssh\" --build-config-map \"settings-mvn:.m2\"", "FROM centos/ruby-22-centos7 USER root COPY ./secret-dir /secrets COPY ./config / Create a shell script that will output secrets and ConfigMaps when the image is run RUN echo '#!/bin/sh' > /input_report.sh RUN echo '(test -f /secrets/secret1 && echo -n \"secret1=\" && cat /secrets/secret1)' >> /input_report.sh RUN echo '(test -f /config && echo -n \"relative-configMap=\" && cat /config)' >> /input_report.sh RUN chmod 755 /input_report.sh CMD [\"/bin/sh\", \"-c\", \"/input_report.sh\"]", "#!/bin/sh APP_VERSION=1.0 wget http://repository.example.com/app/app-USDAPP_VERSION.jar -O app.jar", "#!/bin/sh exec java -jar app.jar", "FROM jboss/base-jdk:8 ENV APP_VERSION 1.0 RUN wget http://repository.example.com/app/app-USDAPP_VERSION.jar -O app.jar EXPOSE 8080 CMD [ \"java\", \"-jar\", \"app.jar\" ]", "auths: index.docker.io/v1/: 1 auth: \"YWRfbGzhcGU6R2labnRib21ifTE=\" 2 email: \"[email protected]\" 3 docker.io/my-namespace/my-user/my-image: 4 auth: \"GzhYWRGU6R2fbclabnRgbkSp=\"\" email: \"[email protected]\" docker.io/my-namespace: 5 auth: \"GzhYWRGU6R2deesfrRgbkSp=\"\" email: \"[email protected]\"", "oc create secret generic dockerhub --from-file=.dockerconfigjson=<path/to/.docker/config.json> --type=kubernetes.io/dockerconfigjson", "spec: output: to: kind: \"DockerImage\" name: \"private.registry.com/org/private-image:latest\" pushSecret: name: \"dockerhub\"", "oc set build-secret --push bc/sample-build dockerhub", "oc secrets link builder dockerhub", "strategy: sourceStrategy: from: kind: \"DockerImage\" name: \"docker.io/user/private_repository\" pullSecret: name: \"dockerhub\"", "oc set build-secret --pull bc/sample-build dockerhub", "oc secrets link builder dockerhub", "env: - name: FIELDREF_ENV valueFrom: fieldRef: fieldPath: metadata.name", "apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: secret-example-bc spec: strategy: sourceStrategy: env: - name: MYVAL valueFrom: secretKeyRef: key: myval name: mysecret", "spec: output: to: kind: \"ImageStreamTag\" name: \"sample-image:latest\"", "spec: output: to: kind: \"DockerImage\" name: \"my-registry.mycompany.com:5000/myimages/myimage:tag\"", "spec: output: to: kind: \"ImageStreamTag\" name: \"my-image:latest\" imageLabels: - name: \"vendor\" value: \"MyCompany\" - name: \"authoritative-source-url\" value: \"registry.mycompany.com\"", "strategy: dockerStrategy: from: kind: \"ImageStreamTag\" name: \"debian:latest\"", "strategy: dockerStrategy: dockerfilePath: dockerfiles/app1/Dockerfile", "dockerStrategy: env: - name: \"HTTP_PROXY\" value: \"http://myproxy.net:5187/\"", "dockerStrategy: buildArgs: - name: \"version\" value: \"latest\"", "strategy: dockerStrategy: imageOptimizationPolicy: SkipLayers", "spec: dockerStrategy: volumes: - name: secret-mvn 1 mounts: - destinationPath: /opt/app-root/src/.ssh 2 source: type: Secret 3 secret: secretName: my-secret 4 - name: settings-mvn 5 mounts: - destinationPath: /opt/app-root/src/.m2 6 source: type: ConfigMap 7 configMap: name: my-config 8 - name: my-csi-volume 9 mounts: - destinationPath: /opt/app-root/src/some_path 10 source: type: CSI 11 csi: driver: csi.sharedresource.openshift.io 12 readOnly: true 13 volumeAttributes: 14 attribute: value", "strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"incremental-image:latest\" 1 incremental: true 2", "strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"builder-image:latest\" scripts: \"http://somehost.com/scripts_directory\" 1", "sourceStrategy: env: - name: \"DISABLE_ASSET_COMPILATION\" value: \"true\"", "#!/bin/bash restore build artifacts if [ \"USD(ls /tmp/s2i/artifacts/ 2>/dev/null)\" ]; then mv /tmp/s2i/artifacts/* USDHOME/. fi move the application source mv /tmp/s2i/src USDHOME/src build application artifacts pushd USD{HOME} make all install the artifacts make install popd", "#!/bin/bash run the application /opt/application/run.sh", "#!/bin/bash pushd USD{HOME} if [ -d deps ]; then # all deps contents to tar stream tar cf - deps fi popd", "#!/bin/bash inform the user how to use the image cat <<EOF This is a S2I sample builder image, to use it, install https://github.com/openshift/source-to-image EOF", "spec: sourceStrategy: volumes: - name: secret-mvn 1 mounts: - destinationPath: /opt/app-root/src/.ssh 2 source: type: Secret 3 secret: secretName: my-secret 4 - name: settings-mvn 5 mounts: - destinationPath: /opt/app-root/src/.m2 6 source: type: ConfigMap 7 configMap: name: my-config 8 - name: my-csi-volume 9 mounts: - destinationPath: /opt/app-root/src/some_path 10 source: type: CSI 11 csi: driver: csi.sharedresource.openshift.io 12 readOnly: true 13 volumeAttributes: 14 attribute: value", "strategy: customStrategy: from: kind: \"DockerImage\" name: \"openshift/sti-image-builder\"", "strategy: customStrategy: secrets: - secretSource: 1 name: \"secret1\" mountPath: \"/tmp/secret1\" 2 - secretSource: name: \"secret2\" mountPath: \"/tmp/secret2\"", "customStrategy: env: - name: \"HTTP_PROXY\" value: \"http://myproxy.net:5187/\"", "oc set env <enter_variables>", "kind: \"BuildConfig\" apiVersion: \"v1\" metadata: name: \"sample-pipeline\" spec: strategy: jenkinsPipelineStrategy: jenkinsfile: |- node('agent') { stage 'build' openshiftBuild(buildConfig: 'ruby-sample-build', showBuildLogs: 'true') stage 'deploy' openshiftDeploy(deploymentConfig: 'frontend') }", "kind: \"BuildConfig\" apiVersion: \"v1\" metadata: name: \"sample-pipeline\" spec: source: git: uri: \"https://github.com/openshift/ruby-hello-world\" strategy: jenkinsPipelineStrategy: jenkinsfilePath: some/repo/dir/filename 1", "jenkinsPipelineStrategy: env: - name: \"FOO\" value: \"BAR\"", "oc project <project_name>", "oc new-app jenkins-ephemeral 1", "kind: \"BuildConfig\" apiVersion: \"v1\" metadata: name: \"nodejs-sample-pipeline\" spec: strategy: jenkinsPipelineStrategy: jenkinsfile: <pipeline content from below> type: JenkinsPipeline", "def templatePath = 'https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json' 1 def templateName = 'nodejs-mongodb-example' 2 pipeline { agent { node { label 'nodejs' 3 } } options { timeout(time: 20, unit: 'MINUTES') 4 } stages { stage('preamble') { steps { script { openshift.withCluster() { openshift.withProject() { echo \"Using project: USD{openshift.project()}\" } } } } } stage('cleanup') { steps { script { openshift.withCluster() { openshift.withProject() { openshift.selector(\"all\", [ template : templateName ]).delete() 5 if (openshift.selector(\"secrets\", templateName).exists()) { 6 openshift.selector(\"secrets\", templateName).delete() } } } } } } stage('create') { steps { script { openshift.withCluster() { openshift.withProject() { openshift.newApp(templatePath) 7 } } } } } stage('build') { steps { script { openshift.withCluster() { openshift.withProject() { def builds = openshift.selector(\"bc\", templateName).related('builds') timeout(5) { 8 builds.untilEach(1) { return (it.object().status.phase == \"Complete\") } } } } } } } stage('deploy') { steps { script { openshift.withCluster() { openshift.withProject() { def rm = openshift.selector(\"dc\", templateName).rollout() timeout(5) { 9 openshift.selector(\"dc\", templateName).related('pods').untilEach(1) { return (it.object().status.phase == \"Running\") } } } } } } } stage('tag') { steps { script { openshift.withCluster() { openshift.withProject() { openshift.tag(\"USD{templateName}:latest\", \"USD{templateName}-staging:latest\") 10 } } } } } } }", "oc create -f nodejs-sample-pipeline.yaml", "oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/jenkins/pipeline/nodejs-sample-pipeline.yaml", "oc start-build nodejs-sample-pipeline", "FROM registry.redhat.io/rhel8/buildah In this example, `/tmp/build` contains the inputs that build when this custom builder image is run. Normally the custom builder image fetches this content from some location at build time, by using git clone as an example. ADD dockerfile.sample /tmp/input/Dockerfile ADD build.sh /usr/bin RUN chmod a+x /usr/bin/build.sh /usr/bin/build.sh contains the actual custom build logic that will be run when this custom builder image is run. ENTRYPOINT [\"/usr/bin/build.sh\"]", "FROM registry.access.redhat.com/ubi9/ubi RUN touch /tmp/build", "#!/bin/sh Note that in this case the build inputs are part of the custom builder image, but normally this is retrieved from an external source. cd /tmp/input OUTPUT_REGISTRY and OUTPUT_IMAGE are env variables provided by the custom build framework TAG=\"USD{OUTPUT_REGISTRY}/USD{OUTPUT_IMAGE}\" performs the build of the new image defined by dockerfile.sample buildah --storage-driver vfs bud --isolation chroot -t USD{TAG} . buildah requires a slight modification to the push secret provided by the service account to use it for pushing the image cp /var/run/secrets/openshift.io/push/.dockercfg /tmp (echo \"{ \\\"auths\\\": \" ; cat /var/run/secrets/openshift.io/push/.dockercfg ; echo \"}\") > /tmp/.dockercfg push the new image to the target for the build buildah --storage-driver vfs push --tls-verify=false --authfile /tmp/.dockercfg USD{TAG}", "oc new-build --binary --strategy=docker --name custom-builder-image", "oc start-build custom-builder-image --from-dir . -F", "kind: BuildConfig apiVersion: build.openshift.io/v1 metadata: name: sample-custom-build labels: name: sample-custom-build annotations: template.alpha.openshift.io/wait-for-ready: 'true' spec: strategy: type: Custom customStrategy: forcePull: true from: kind: ImageStreamTag name: custom-builder-image:latest namespace: <yourproject> 1 output: to: kind: ImageStreamTag name: sample-custom:latest", "oc create -f buildconfig.yaml", "kind: ImageStream apiVersion: image.openshift.io/v1 metadata: name: sample-custom spec: {}", "oc create -f imagestream.yaml", "oc start-build sample-custom-build -F", "oc start-build <buildconfig_name>", "oc start-build --from-build=<build_name>", "oc start-build <buildconfig_name> --follow", "oc start-build <buildconfig_name> --env=<key>=<value>", "oc start-build hello-world --from-repo=../hello-world --commit=v2", "oc cancel-build <build_name>", "oc cancel-build <build1_name> <build2_name> <build3_name>", "oc cancel-build bc/<buildconfig_name>", "oc cancel-build bc/<buildconfig_name>", "oc delete bc <BuildConfigName>", "oc delete --cascade=false bc <BuildConfigName>", "oc describe build <build_name>", "oc describe build <build_name>", "oc logs -f bc/<buildconfig_name>", "oc logs --version=<number> bc/<buildconfig_name>", "sourceStrategy: env: - name: \"BUILD_LOGLEVEL\" value: \"2\" 1", "type: \"GitHub\" github: secretReference: name: \"mysecret\"", "- kind: Secret apiVersion: v1 metadata: name: mysecret creationTimestamp: data: WebHookSecretKey: c2VjcmV0dmFsdWUx", "type: \"GitHub\" github: secretReference: name: \"mysecret\"", "https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/github", "oc describe bc/<name_of_your_BuildConfig>", "https://api.starter-us-east-1.openshift.com:443/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/github", "curl -H \"X-GitHub-Event: push\" -H \"Content-Type: application/json\" -k -X POST --data-binary @payload.json https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/github", "type: \"GitLab\" gitlab: secretReference: name: \"mysecret\"", "https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/gitlab", "oc describe bc <name>", "curl -H \"X-GitLab-Event: Push Hook\" -H \"Content-Type: application/json\" -k -X POST --data-binary @payload.json https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/gitlab", "type: \"Bitbucket\" bitbucket: secretReference: name: \"mysecret\"", "https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/bitbucket", "oc describe bc <name>", "curl -H \"X-Event-Key: repo:push\" -H \"Content-Type: application/json\" -k -X POST --data-binary @payload.json https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/bitbucket", "type: \"Generic\" generic: secretReference: name: \"mysecret\" allowEnv: true 1", "https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/generic", "curl -X POST -k https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/generic", "git: uri: \"<url to git repository>\" ref: \"<optional git reference>\" commit: \"<commit hash identifying a specific git commit>\" author: name: \"<author name>\" email: \"<author e-mail>\" committer: name: \"<committer name>\" email: \"<committer e-mail>\" message: \"<commit message>\" env: 1 - name: \"<variable name>\" value: \"<variable value>\"", "curl -H \"Content-Type: application/yaml\" --data-binary @payload_file.yaml -X POST -k https://<openshift_api_host:port>/apis/build.openshift.io/v1/namespaces/<namespace>/buildconfigs/<name>/webhooks/<secret>/generic", "oc describe bc <name>", "kind: \"ImageStream\" apiVersion: \"v1\" metadata: name: \"ruby-20-centos7\"", "strategy: sourceStrategy: from: kind: \"ImageStreamTag\" name: \"ruby-20-centos7:latest\"", "type: \"ImageChange\" 1 imageChange: {} type: \"ImageChange\" 2 imageChange: from: kind: \"ImageStreamTag\" name: \"custom-image:latest\"", "strategy: sourceStrategy: from: kind: \"DockerImage\" name: \"172.30.17.3:5001/mynamespace/ruby-20-centos7:<immutableid>\"", "type: \"ImageChange\" imageChange: from: kind: \"ImageStreamTag\" name: \"custom-image:latest\" paused: true", "apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: bc-ict-example namespace: bc-ict-example-namespace spec: triggers: - imageChange: from: kind: ImageStreamTag name: input:latest namespace: bc-ict-example-namespace - imageChange: from: kind: ImageStreamTag name: input2:latest namespace: bc-ict-example-namespace type: ImageChange status: imageChangeTriggers: - from: name: input:latest namespace: bc-ict-example-namespace lastTriggerTime: \"2021-06-30T13:47:53Z\" lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input@sha256:0f88ffbeb9d25525720bfa3524cb1bf0908b7f791057cf1acfae917b11266a69 - from: name: input2:latest namespace: bc-ict-example-namespace lastTriggeredImageID: image-registry.openshift-image-registry.svc:5000/bc-ict-example-namespace/input2@sha256:0f88ffbeb9d25525720bfa3524cb2ce0908b7f791057cf1acfae917b11266a69 lastVersion: 1", "Then you use the `name` and `namespace` from that build to find the corresponding image change trigger in `buildConfig.spec.triggers`.", "type: \"ConfigChange\"", "oc set triggers bc <name> --from-github", "oc set triggers bc <name> --from-image='<image>'", "oc set triggers bc <name> --from-bitbucket --remove", "oc set triggers --help", "postCommit: script: \"bundle exec rake test --verbose\"", "postCommit: command: [\"/bin/bash\", \"-c\", \"bundle exec rake test --verbose\"]", "postCommit: command: [\"bundle\", \"exec\", \"rake\", \"test\"] args: [\"--verbose\"]", "oc set build-hook bc/mybc --post-commit --command -- bundle exec rake test --verbose", "oc set build-hook bc/mybc --post-commit --script=\"bundle exec rake test --verbose\"", "apiVersion: \"v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: resources: limits: cpu: \"100m\" 1 memory: \"256Mi\" 2", "resources: requests: 1 cpu: \"100m\" memory: \"256Mi\"", "spec: completionDeadlineSeconds: 1800", "apiVersion: \"v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: nodeSelector: 1 key1: value1 key2: value2", "apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: artifact-build spec: output: to: kind: ImageStreamTag name: artifact-image:latest source: git: uri: https://github.com/openshift/openshift-jee-sample.git ref: \"master\" strategy: sourceStrategy: from: kind: ImageStreamTag name: wildfly:10.1 namespace: openshift", "apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: image-build spec: output: to: kind: ImageStreamTag name: image-build:latest source: dockerfile: |- FROM jee-runtime:latest COPY ROOT.war /deployments/ROOT.war images: - from: 1 kind: ImageStreamTag name: artifact-image:latest paths: 2 - sourcePath: /wildfly/standalone/deployments/ROOT.war destinationDir: \".\" strategy: dockerStrategy: from: 3 kind: ImageStreamTag name: jee-runtime:latest triggers: - imageChange: {} type: ImageChange", "apiVersion: \"v1\" kind: \"BuildConfig\" metadata: name: \"sample-build\" spec: successfulBuildsHistoryLimit: 2 1 failedBuildsHistoryLimit: 2 2", "oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi9:latest -n openshift", "apiVersion: image.openshift.io/v1 kind: ImageStream metadata: name: ubi9 namespace: openshift spec: tags: - from: kind: DockerImage name: registry.redhat.io/ubi9/ubi:latest name: latest referencePolicy: type: Source", "oc tag --source=docker registry.redhat.io/ubi9/ubi:latest ubi:latest", "apiVersion: image.openshift.io/v1 kind: ImageStream metadata: name: ubi9 spec: tags: - from: kind: DockerImage name: registry.redhat.io/ubi9/ubi:latest name: latest referencePolicy: type: Source", "cat << EOF > secret-template.txt kind: Secret apiVersion: v1 metadata: name: etc-pki-entitlement type: Opaque data: {{ range \\USDkey, \\USDvalue := .data }} {{ \\USDkey }}: {{ \\USDvalue }} {{ end }} EOF oc get secret etc-pki-entitlement -n openshift-config-managed -o=go-template-file --template=secret-template.txt | oc apply -f -", "strategy: dockerStrategy: from: kind: ImageStreamTag name: ubi9:latest volumes: - name: etc-pki-entitlement mounts: - destinationPath: /etc/pki/entitlement source: type: Secret secret: secretName: etc-pki-entitlement", "FROM registry.redhat.io/ubi9/ubi:latest RUN rm -rf /etc/rhsm-host 1 RUN yum --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms install \\ 2 nss_wrapper uid_wrapper -y && yum clean all -y RUN ln -s /run/secrets/rhsm /etc/rhsm-host 3", "[test-<name>] name=test-<number> baseurl = https://satellite.../content/dist/rhel/server/7/7Server/x86_64/os enabled=1 gpgcheck=0 sslverify=0 sslclientkey = /etc/pki/entitlement/...-key.pem sslclientcert = /etc/pki/entitlement/....pem", "oc create configmap yum-repos-d --from-file /path/to/satellite.repo", "strategy: dockerStrategy: from: kind: ImageStreamTag name: ubi9:latest volumes: - name: yum-repos-d mounts: - destinationPath: /etc/yum.repos.d source: type: ConfigMap configMap: name: yum-repos-d - name: etc-pki-entitlement mounts: - destinationPath: /etc/pki/entitlement source: type: Secret secret: secretName: etc-pki-entitlement", "FROM registry.redhat.io/ubi9/ubi:latest RUN rm -rf /etc/rhsm-host 1 RUN yum --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms install \\ 2 nss_wrapper uid_wrapper -y && yum clean all -y RUN ln -s /run/secrets/rhsm /etc/rhsm-host 3", "oc apply -f - <<EOF kind: SharedSecret apiVersion: sharedresource.openshift.io/v1alpha1 metadata: name: etc-pki-entitlement spec: secretRef: name: etc-pki-entitlement namespace: openshift-config-managed EOF", "oc apply -f - <<EOF apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: builder-etc-pki-entitlement namespace: build-namespace rules: - apiGroups: - sharedresource.openshift.io resources: - sharedsecrets resourceNames: - etc-pki-entitlement verbs: - use EOF", "oc create rolebinding builder-etc-pki-entitlement --role=builder-etc-pki-entitlement --serviceaccount=build-namespace:builder", "apiVersion: build.openshift.io/v1 kind: BuildConfig metadata: name: uid-wrapper-rhel9 namespace: build-namespace spec: runPolicy: Serial source: dockerfile: | FROM registry.redhat.io/ubi9/ubi:latest RUN rm -rf /etc/rhsm-host 1 RUN yum --enablerepo=codeready-builder-for-rhel-9-x86_64-rpms install \\ 2 nss_wrapper uid_wrapper -y && yum clean all -y RUN ln -s /run/secrets/rhsm /etc/rhsm-host 3 strategy: type: Docker dockerStrategy: volumes: - mounts: - destinationPath: \"/etc/pki/entitlement\" name: etc-pki-entitlement source: csi: driver: csi.sharedresource.openshift.io readOnly: true 4 volumeAttributes: sharedSecret: etc-pki-entitlement 5 type: CSI", "oc start-build uid-wrapper-rhel9 -n build-namespace -F", "oc annotate clusterrolebinding.rbac system:build-strategy-docker-binding 'rbac.authorization.kubernetes.io/autoupdate=false' --overwrite", "oc adm policy remove-cluster-role-from-group system:build-strategy-docker system:authenticated", "oc get clusterrole admin -o yaml | grep \"builds/docker\"", "oc get clusterrole edit -o yaml | grep \"builds/docker\"", "oc adm policy add-cluster-role-to-user system:build-strategy-docker devuser", "oc adm policy add-role-to-user system:build-strategy-docker devuser -n devproject", "oc edit build.config.openshift.io/cluster", "apiVersion: config.openshift.io/v1 kind: Build 1 metadata: annotations: release.openshift.io/create-only: \"true\" creationTimestamp: \"2019-05-17T13:44:26Z\" generation: 2 name: cluster resourceVersion: \"107233\" selfLink: /apis/config.openshift.io/v1/builds/cluster uid: e2e9cc14-78a9-11e9-b92b-06d6c7da38dc spec: buildDefaults: 2 defaultProxy: 3 httpProxy: http://proxy.com httpsProxy: https://proxy.com noProxy: internal.com env: 4 - name: envkey value: envvalue gitProxy: 5 httpProxy: http://gitproxy.com httpsProxy: https://gitproxy.com noProxy: internalgit.com imageLabels: 6 - name: labelkey value: labelvalue resources: 7 limits: cpu: 100m memory: 50Mi requests: cpu: 10m memory: 10Mi buildOverrides: 8 imageLabels: 9 - name: labelkey value: labelvalue nodeSelector: 10 selectorkey: selectorvalue tolerations: 11 - effect: NoSchedule key: node-role.kubernetes.io/builds operator: Exists", "requested access to the resource is denied", "oc describe quota", "secret/ssl-key references serviceUID 62ad25ca-d703-11e6-9d6f-0e9c0057b608, which does not match 77b6dd80-d716-11e6-9d6f-0e9c0057b60", "oc delete secret <secret_name>", "oc annotate service <service_name> service.beta.openshift.io/serving-cert-generation-error-", "oc annotate service <service_name> service.beta.openshift.io/serving-cert-generation-error-num-", "oc create configmap registry-cas -n openshift-config --from-file=myregistry.corp.com..5000=/etc/docker/certs.d/myregistry.corp.com:5000/ca.crt --from-file=otherregistry.com=/etc/docker/certs.d/otherregistry.com/ca.crt", "oc patch image.config.openshift.io/cluster --patch '{\"spec\":{\"additionalTrustedCA\":{\"name\":\"registry-cas\"}}}' --type=merge" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html-single/builds_using_buildconfig/index
4.190. net-tools
4.190. net-tools 4.190.1. RHBA-2011:1596 - net-tools bug fix update An updated net-tools package that fixes various bugs is now available for Red Hat Enterprise Linux 6. The net-tools package contains basic networking tools, including ifconfig, netstat, route, and others. Bug Fixes BZ# 705110 Prior to this update, the "hostname -i" command failed to display related network addresses when the hostname was not included in the /etc/hosts file. The "hostname -f" command had the same issue with Fully Qualified Domain Names (FQDNs). To fix this issue, new "--all-fqdns" (or "-A") and "--all-ip-addresses" (or "-I") options have been implemented for the hostname command. These options are independent on the /etc/hosts content. The "hostname -I" command now displays all network addresses for all configured network interfaces, and the "hostname -A" command displays all FQDNs for all configured network interfaces of the host. BZ# 725348 The "netstat -p" command output incorrectly displayed a number in the PID/Program name column instead of the program name. The code has been modified to fix this issue, and netstat now shows the correct program name in this column. BZ# 732984 The netstat utility truncated IPv6 UDP sockets when the "--notrim" (or "-T") option was specified. This update fixes the issue, and whole IPv6 addresses are now displayed for UDP sockets when using netstat with this option. BZ# 680837 The route(8) manual page now includes an explicit description of the "mss M" option. BZ# 694766 The SYNOPSIS section of the plipconfig(8) manual page and the usage output of the plipconfig command have been modified to show correct plipconfig options. All users of net-tools are advised to upgrade to this updated package, which resolves these issues. 4.190.2. RHBA-2012:0555 - net-tools bug fix update Updated net-tools packages that fix one bug are now available for Red Hat Enterprise Linux 6. The net-tools packages contain basic networking tools. including hostname, ifconfig, netstat, or route. Bug Fix BZ# 816375 Running the "hostname" command with the "-A, --all-fqdns" or "-I, --all-ip-addresses" option to display all Fully Qualified Domain Names (FQDNs) or network addresses of the host failed with the "Hostname lookup failure" error if the machine's host name was not resolved in DNS. With this update, these options are no longer dependent on name resolution; all FQDNs and network addresses of the host are now displayed as expected even if the host name cannot be resolved or is not included in the /etc/hosts file. All users of net-tools are advised to upgrade to these updated packages, which fix this bug.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/6.2_technical_notes/net-tools
CLI tools
CLI tools OpenShift Container Platform 4.14 Learning how to use the command-line tools for OpenShift Container Platform Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.14/html/cli_tools/index
Image APIs
Image APIs OpenShift Container Platform 4.13 Reference guide for image APIs Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html/image_apis/index
Virtualization
Virtualization OpenShift Container Platform 4.12 OpenShift Virtualization installation, usage, and release notes Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.12/html/virtualization/index
35.2. Finishing an Upgrade
35.2. Finishing an Upgrade Important Once you have rebooted your system after performing an upgrade, you should also perform a manual system update. Consult Section 35.1, "Updating Your System" for more information. If you chose to upgrade your system from a release rather than perform a fresh installation, you may want to examine the differences in the package set. Section 9.12.2, " Upgrading Using the Installer " , Section 16.14.2, " Upgrading Using the Installer " , or Section 23.12.1, " Upgrading Using the Installer " (depending on your system architecture) advised you to create a package listing for your original system. You can now use that listing to determine how to bring your new system close to the original system state. Most software repository configurations are stored in packages that end with the term release . Check the old package list for the repositories that were installed: If necessary, retrieve and install these packages from their original sources on the Internet. Follow the instructions at the originating site to install the repository configuration packages for use by yum and other software management tools on your Red Hat Enterprise Linux system. Then run the following commands to make a list of other missing software packages: Now use the file /tmp/pkgs-to-install.txt with the yum command to restore most or all of your old software: Important Due to changes in package complements between Red Hat Enterprise Linux releases, it is possible this method may not restore all the software on your system. You can use the routines above to again compare the software on your system, and remedy any problems you find.
[ "awk '{print USD1}' ~/old-pkglist.txt | grep 'releaseUSD'", "awk '{print USD1}' ~/old-pkglist.txt | sort | uniq > ~/old-pkgnames.txt rpm -qa --qf '%{NAME}\\n' | sort | uniq > ~/new-pkgnames.txt diff -u ~/old-pkgnames.txt ~/new-pkgnames.txt | grep '^-' | sed 's/^-//' > /tmp/pkgs-to-install.txt", "su -c 'yum install `cat /tmp/pkgs-to-install.txt`'" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/installation_guide/sn-finishing-upgrade
Chapter 9. Installing a private cluster on GCP
Chapter 9. Installing a private cluster on GCP In OpenShift Container Platform version 4.17, you can install a private cluster into an existing VPC on Google Cloud Platform (GCP). The installation program provisions the rest of the required infrastructure, which you can further customize. To customize the installation, you modify parameters in the install-config.yaml file before you install the cluster. 9.1. Prerequisites You reviewed details about the OpenShift Container Platform installation and update processes. You read the documentation on selecting a cluster installation method and preparing it for users . You configured a GCP project to host the cluster. If you use a firewall, you configured it to allow the sites that your cluster requires access to. 9.2. Private clusters You can deploy a private OpenShift Container Platform cluster that does not expose external endpoints. Private clusters are accessible from only an internal network and are not visible to the internet. By default, OpenShift Container Platform is provisioned to use publicly-accessible DNS and endpoints. A private cluster sets the DNS, Ingress Controller, and API server to private when you deploy your cluster. This means that the cluster resources are only accessible from your internal network and are not visible to the internet. Important If the cluster has any public subnets, load balancer services created by administrators might be publicly accessible. To ensure cluster security, verify that these services are explicitly annotated as private. To deploy a private cluster, you must: Use existing networking that meets your requirements. Your cluster resources might be shared between other clusters on the network. Deploy from a machine that has access to: The API services for the cloud to which you provision. The hosts on the network that you provision. The internet to obtain installation media. You can use any machine that meets these access requirements and follows your company's guidelines. For example, this machine can be a bastion host on your cloud network or a machine that has access to the network through a VPN. 9.2.1. Private clusters in GCP To create a private cluster on Google Cloud Platform (GCP), you must provide an existing private VPC and subnets to host the cluster. The installation program must also be able to resolve the DNS records that the cluster requires. The installation program configures the Ingress Operator and API server for only internal traffic. The cluster still requires access to internet to access the GCP APIs. The following items are not required or created when you install a private cluster: Public subnets Public network load balancers, which support public ingress A public DNS zone that matches the baseDomain for the cluster The installation program does use the baseDomain that you specify to create a private DNS zone and the required records for the cluster. The cluster is configured so that the Operators do not create public records for the cluster and all cluster machines are placed in the private subnets that you specify. Because it is not possible to limit access to external load balancers based on source tags, the private cluster uses only internal load balancers to allow access to internal instances. The internal load balancer relies on instance groups rather than the target pools that the network load balancers use. The installation program creates instance groups for each zone, even if there is no instance in that group. The cluster IP address is internal only. One forwarding rule manages both the Kubernetes API and machine config server ports. The backend service is comprised of each zone's instance group and, while it exists, the bootstrap instance group. The firewall uses a single rule that is based on only internal source ranges. 9.2.1.1. Limitations No health check for the Machine config server, /healthz , runs because of a difference in load balancer functionality. Two internal load balancers cannot share a single IP address, but two network load balancers can share a single external IP address. Instead, the health of an instance is determined entirely by the /readyz check on port 6443. 9.3. About using a custom VPC In OpenShift Container Platform 4.17, you can deploy a cluster into an existing VPC in Google Cloud Platform (GCP). If you do, you must also use existing subnets within the VPC and routing rules. By deploying OpenShift Container Platform into an existing GCP VPC, you might be able to avoid limit constraints in new accounts or more easily abide by the operational constraints that your company's guidelines set. This is a good option to use if you cannot obtain the infrastructure creation permissions that are required to create the VPC yourself. 9.3.1. Requirements for using your VPC The installation program will no longer create the following components: VPC Subnets Cloud router Cloud NAT NAT IP addresses If you use a custom VPC, you must correctly configure it and its subnets for the installation program and the cluster to use. The installation program cannot subdivide network ranges for the cluster to use, set route tables for the subnets, or set VPC options like DHCP, so you must do so before you install the cluster. Your VPC and subnets must meet the following characteristics: The VPC must be in the same GCP project that you deploy the OpenShift Container Platform cluster to. To allow access to the internet from the control plane and compute machines, you must configure cloud NAT on the subnets to allow egress to it. These machines do not have a public address. Even if you do not require access to the internet, you must allow egress to the VPC network to obtain the installation program and images. Because multiple cloud NATs cannot be configured on the shared subnets, the installation program cannot configure it. To ensure that the subnets that you provide are suitable, the installation program confirms the following data: All the subnets that you specify exist and belong to the VPC that you specified. The subnet CIDRs belong to the machine CIDR. You must provide a subnet to deploy the cluster control plane and compute machines to. You can use the same subnet for both machine types. If you destroy a cluster that uses an existing VPC, the VPC is not deleted. 9.3.2. Division of permissions Starting with OpenShift Container Platform 4.3, you do not need all of the permissions that are required for an installation program-provisioned infrastructure cluster to deploy a cluster. This change mimics the division of permissions that you might have at your company: some individuals can create different resources in your clouds than others. For example, you might be able to create application-specific items, like instances, buckets, and load balancers, but not networking-related components such as VPCs, subnets, or Ingress rules. The GCP credentials that you use when you create your cluster do not need the networking permissions that are required to make VPCs and core networking components within the VPC, such as subnets, routing tables, internet gateways, NAT, and VPN. You still need permission to make the application resources that the machines within the cluster require, such as load balancers, security groups, storage, and nodes. 9.3.3. Isolation between clusters If you deploy OpenShift Container Platform to an existing network, the isolation of cluster services is preserved by firewall rules that reference the machines in your cluster by the cluster's infrastructure ID. Only traffic within the cluster is allowed. If you deploy multiple clusters to the same VPC, the following components might share access between clusters: The API, which is globally available with an external publishing strategy or available throughout the network in an internal publishing strategy Debugging tools, such as ports on VM instances that are open to the machine CIDR for SSH and ICMP access 9.4. Internet access for OpenShift Container Platform In OpenShift Container Platform 4.17, you require access to the internet to install your cluster. You must have internet access to: Access OpenShift Cluster Manager to download the installation program and perform subscription management. If the cluster has internet access and you do not disable Telemetry, that service automatically entitles your cluster. Access Quay.io to obtain the packages that are required to install your cluster. Obtain the packages that are required to perform cluster updates. Important If your cluster cannot have direct internet access, you can perform a restricted network installation on some types of infrastructure that you provision. During that process, you download the required content and use it to populate a mirror registry with the installation packages. With some installation types, the environment that you install your cluster in will not require internet access. Before you update the cluster, you update the content of the mirror registry. 9.5. Generating a key pair for cluster node SSH access During an OpenShift Container Platform installation, you can provide an SSH public key to the installation program. The key is passed to the Red Hat Enterprise Linux CoreOS (RHCOS) nodes through their Ignition config files and is used to authenticate SSH access to the nodes. The key is added to the ~/.ssh/authorized_keys list for the core user on each node, which enables password-less authentication. After the key is passed to the nodes, you can use the key pair to SSH in to the RHCOS nodes as the user core . To access the nodes through SSH, the private key identity must be managed by SSH for your local user. If you want to SSH in to your cluster nodes to perform installation debugging or disaster recovery, you must provide the SSH public key during the installation process. The ./openshift-install gather command also requires the SSH public key to be in place on the cluster nodes. Important Do not skip this procedure in production environments, where disaster recovery and debugging is required. Note You must use a local key, not one that you configured with platform-specific approaches such as AWS key pairs . Procedure If you do not have an existing SSH key pair on your local machine to use for authentication onto your cluster nodes, create one. For example, on a computer that uses a Linux operating system, run the following command: USD ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1 1 Specify the path and file name, such as ~/.ssh/id_ed25519 , of the new SSH key. If you have an existing key pair, ensure your public key is in the your ~/.ssh directory. Note If you plan to install an OpenShift Container Platform cluster that uses the RHEL cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64 , ppc64le , and s390x architectures, do not create a key that uses the ed25519 algorithm. Instead, create a key that uses the rsa or ecdsa algorithm. View the public SSH key: USD cat <path>/<file_name>.pub For example, run the following to view the ~/.ssh/id_ed25519.pub public key: USD cat ~/.ssh/id_ed25519.pub Add the SSH private key identity to the SSH agent for your local user, if it has not already been added. SSH agent management of the key is required for password-less SSH authentication onto your cluster nodes, or if you want to use the ./openshift-install gather command. Note On some distributions, default SSH private key identities such as ~/.ssh/id_rsa and ~/.ssh/id_dsa are managed automatically. If the ssh-agent process is not already running for your local user, start it as a background task: USD eval "USD(ssh-agent -s)" Example output Agent pid 31874 Note If your cluster is in FIPS mode, only use FIPS-compliant algorithms to generate the SSH key. The key must be either RSA or ECDSA. Add your SSH private key to the ssh-agent : USD ssh-add <path>/<file_name> 1 1 Specify the path and file name for your SSH private key, such as ~/.ssh/id_ed25519 Example output Identity added: /home/<you>/<path>/<file_name> (<computer_name>) steps When you install OpenShift Container Platform, provide the SSH public key to the installation program. 9.6. Obtaining the installation program Before you install OpenShift Container Platform, download the installation file on the host you are using for installation. Prerequisites You have a computer that runs Linux or macOS, with 500 MB of local disk space. Procedure Go to the Cluster Type page on the Red Hat Hybrid Cloud Console. If you have a Red Hat account, log in with your credentials. If you do not, create an account. Tip You can also download the binaries for a specific OpenShift Container Platform release . Select your infrastructure provider from the Run it yourself section of the page. Select your host operating system and architecture from the dropdown menus under OpenShift Installer and click Download Installer . Place the downloaded file in the directory where you want to store the installation configuration files. Important The installation program creates several files on the computer that you use to install your cluster. You must keep the installation program and the files that the installation program creates after you finish installing the cluster. Both of the files are required to delete the cluster. Deleting the files created by the installation program does not remove your cluster, even if the cluster failed during installation. To remove your cluster, complete the OpenShift Container Platform uninstallation procedures for your specific cloud provider. Extract the installation program. For example, on a computer that uses a Linux operating system, run the following command: USD tar -xvf openshift-install-linux.tar.gz Download your installation pull secret from Red Hat OpenShift Cluster Manager . This pull secret allows you to authenticate with the services that are provided by the included authorities, including Quay.io, which serves the container images for OpenShift Container Platform components. Tip Alternatively, you can retrieve the installation program from the Red Hat Customer Portal , where you can specify a version of the installation program to download. However, you must have an active subscription to access this page. 9.7. Manually creating the installation configuration file Installing the cluster requires that you manually create the installation configuration file. Prerequisites You have an SSH public key on your local machine to provide to the installation program. The key will be used for SSH authentication onto your cluster nodes for debugging and disaster recovery. You have obtained the OpenShift Container Platform installation program and the pull secret for your cluster. Procedure Create an installation directory to store your required installation assets in: USD mkdir <installation_directory> Important You must create a directory. Some installation assets, like bootstrap X.509 certificates have short expiration intervals, so you must not reuse an installation directory. If you want to reuse individual files from another cluster installation, you can copy them into your directory. However, the file names for the installation assets might change between releases. Use caution when copying installation files from an earlier OpenShift Container Platform version. Customize the sample install-config.yaml file template that is provided and save it in the <installation_directory> . Note You must name this configuration file install-config.yaml . Back up the install-config.yaml file so that you can use it to install multiple clusters. Important The install-config.yaml file is consumed during the step of the installation process. You must back it up now. Additional resources Installation configuration parameters for GCP 9.7.1. Minimum resource requirements for cluster installation Each cluster machine must meet the following minimum requirements: Table 9.1. Minimum resource requirements Machine Operating System vCPU [1] Virtual RAM Storage Input/Output Per Second (IOPS) [2] Bootstrap RHCOS 4 16 GB 100 GB 300 Control plane RHCOS 4 16 GB 100 GB 300 Compute RHCOS, RHEL 8.6 and later [3] 2 8 GB 100 GB 300 One vCPU is equivalent to one physical core when simultaneous multithreading (SMT), or Hyper-Threading, is not enabled. When enabled, use the following formula to calculate the corresponding ratio: (threads per core x cores) x sockets = vCPUs. OpenShift Container Platform and Kubernetes are sensitive to disk performance, and faster storage is recommended, particularly for etcd on the control plane nodes which require a 10 ms p99 fsync duration. Note that on many cloud platforms, storage size and IOPS scale together, so you might need to over-allocate storage volume to obtain sufficient performance. As with all user-provisioned installations, if you choose to use RHEL compute machines in your cluster, you take responsibility for all operating system life cycle management and maintenance, including performing system updates, applying patches, and completing all other required tasks. Use of RHEL 7 compute machines is deprecated and has been removed in OpenShift Container Platform 4.10 and later. Note As of OpenShift Container Platform version 4.13, RHCOS is based on RHEL version 9.2, which updates the micro-architecture requirements. The following list contains the minimum instruction set architectures (ISA) that each architecture requires: x86-64 architecture requires x86-64-v2 ISA ARM64 architecture requires ARMv8.0-A ISA IBM Power architecture requires Power 9 ISA s390x architecture requires z14 ISA For more information, see Architectures (RHEL documentation). If an instance type for your platform meets the minimum requirements for cluster machines, it is supported to use in OpenShift Container Platform. Additional resources Optimizing storage 9.7.2. Tested instance types for GCP The following Google Cloud Platform instance types have been tested with OpenShift Container Platform. Example 9.1. Machine series A2 A3 C2 C2D C3 C3D E2 M1 N1 N2 N2D N4 Tau T2D 9.7.3. Tested instance types for GCP on 64-bit ARM infrastructures The following Google Cloud Platform (GCP) 64-bit ARM instance types have been tested with OpenShift Container Platform. Example 9.2. Machine series for 64-bit ARM machines Tau T2A 9.7.4. Using custom machine types Using a custom machine type to install a OpenShift Container Platform cluster is supported. Consider the following when using a custom machine type: Similar to predefined instance types, custom machine types must meet the minimum resource requirements for control plane and compute machines. For more information, see "Minimum resource requirements for cluster installation". The name of the custom machine type must adhere to the following syntax: custom-<number_of_cpus>-<amount_of_memory_in_mb> For example, custom-6-20480 . As part of the installation process, you specify the custom machine type in the install-config.yaml file. Sample install-config.yaml file with a custom machine type compute: - architecture: amd64 hyperthreading: Enabled name: worker platform: gcp: type: custom-6-20480 replicas: 2 controlPlane: architecture: amd64 hyperthreading: Enabled name: master platform: gcp: type: custom-6-20480 replicas: 3 9.7.5. Enabling Shielded VMs You can use Shielded VMs when installing your cluster. Shielded VMs have extra security features including secure boot, firmware and integrity monitoring, and rootkit detection. For more information, see Google's documentation on Shielded VMs . Note Shielded VMs are currently not supported on clusters with 64-bit ARM infrastructures. Procedure Use a text editor to edit the install-config.yaml file prior to deploying your cluster and add one of the following stanzas: To use shielded VMs for only control plane machines: controlPlane: platform: gcp: secureBoot: Enabled To use shielded VMs for only compute machines: compute: - platform: gcp: secureBoot: Enabled To use shielded VMs for all machines: platform: gcp: defaultMachinePlatform: secureBoot: Enabled 9.7.6. Enabling Confidential VMs You can use Confidential VMs when installing your cluster. Confidential VMs encrypt data while it is being processed. For more information, see Google's documentation on Confidential Computing . You can enable Confidential VMs and Shielded VMs at the same time, although they are not dependent on each other. Note Confidential VMs are currently not supported on 64-bit ARM architectures. Procedure Use a text editor to edit the install-config.yaml file prior to deploying your cluster and add one of the following stanzas: To use confidential VMs for only control plane machines: controlPlane: platform: gcp: confidentialCompute: Enabled 1 type: n2d-standard-8 2 onHostMaintenance: Terminate 3 1 Enable confidential VMs. 2 Specify a machine type that supports Confidential VMs. Confidential VMs require the N2D or C2D series of machine types. For more information on supported machine types, see Supported operating systems and machine types . 3 Specify the behavior of the VM during a host maintenance event, such as a hardware or software update. For a machine that uses Confidential VM, this value must be set to Terminate , which stops the VM. Confidential VMs do not support live VM migration. To use confidential VMs for only compute machines: compute: - platform: gcp: confidentialCompute: Enabled type: n2d-standard-8 onHostMaintenance: Terminate To use confidential VMs for all machines: platform: gcp: defaultMachinePlatform: confidentialCompute: Enabled type: n2d-standard-8 onHostMaintenance: Terminate 9.7.7. Sample customized install-config.yaml file for GCP You can customize the install-config.yaml file to specify more details about your OpenShift Container Platform cluster's platform or modify the values of the required parameters. Important This sample YAML file is provided for reference only. You must obtain your install-config.yaml file by using the installation program and modify it. apiVersion: v1 baseDomain: example.com 1 credentialsMode: Mint 2 controlPlane: 3 4 hyperthreading: Enabled 5 name: master platform: gcp: type: n2-standard-4 zones: - us-central1-a - us-central1-c osDisk: diskType: pd-ssd diskSizeGB: 1024 encryptionKey: 6 kmsKey: name: worker-key keyRing: test-machine-keys location: global projectID: project-id tags: 7 - control-plane-tag1 - control-plane-tag2 osImage: 8 project: example-project-name name: example-image-name replicas: 3 compute: 9 10 - hyperthreading: Enabled 11 name: worker platform: gcp: type: n2-standard-4 zones: - us-central1-a - us-central1-c osDisk: diskType: pd-standard diskSizeGB: 128 encryptionKey: 12 kmsKey: name: worker-key keyRing: test-machine-keys location: global projectID: project-id tags: 13 - compute-tag1 - compute-tag2 osImage: 14 project: example-project-name name: example-image-name replicas: 3 metadata: name: test-cluster 15 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 16 serviceNetwork: - 172.30.0.0/16 platform: gcp: projectID: openshift-production 17 region: us-central1 18 defaultMachinePlatform: tags: 19 - global-tag1 - global-tag2 osImage: 20 project: example-project-name name: example-image-name network: existing_vpc 21 controlPlaneSubnet: control_plane_subnet 22 computeSubnet: compute_subnet 23 pullSecret: '{"auths": ...}' 24 fips: false 25 sshKey: ssh-ed25519 AAAA... 26 publish: Internal 27 1 15 17 18 24 Required. The installation program prompts you for this value. 2 Optional: Add this parameter to force the Cloud Credential Operator (CCO) to use the specified mode. By default, the CCO uses the root credentials in the kube-system namespace to dynamically try to determine the capabilities of the credentials. For details about CCO modes, see the "About the Cloud Credential Operator" section in the Authentication and authorization guide. 3 9 If you do not provide these parameters and values, the installation program provides the default value. 4 10 The controlPlane section is a single mapping, but the compute section is a sequence of mappings. To meet the requirements of the different data structures, the first line of the compute section must begin with a hyphen, - , and the first line of the controlPlane section must not. Only one control plane pool is used. 5 11 Whether to enable or disable simultaneous multithreading, or hyperthreading . By default, simultaneous multithreading is enabled to increase the performance of your machines' cores. You can disable it by setting the parameter value to Disabled . If you disable simultaneous multithreading in some cluster machines, you must disable it in all cluster machines. Important If you disable simultaneous multithreading, ensure that your capacity planning accounts for the dramatically decreased machine performance. Use larger machine types, such as n1-standard-8 , for your machines if you disable simultaneous multithreading. 6 12 Optional: The custom encryption key section to encrypt both virtual machines and persistent volumes. Your default compute service account must have the permissions granted to use your KMS key and have the correct IAM role assigned. The default service account name follows the service-<project_number>@compute-system.iam.gserviceaccount.com pattern. For more information about granting the correct permissions for your service account, see "Machine management" "Creating compute machine sets" "Creating a compute machine set on GCP". 7 13 19 Optional: A set of network tags to apply to the control plane or compute machine sets. The platform.gcp.defaultMachinePlatform.tags parameter will apply to both control plane and compute machines. If the compute.platform.gcp.tags or controlPlane.platform.gcp.tags parameters are set, they override the platform.gcp.defaultMachinePlatform.tags parameter. 8 14 20 Optional: A custom Red Hat Enterprise Linux CoreOS (RHCOS) that should be used to boot control plane and compute machines. The project and name parameters under platform.gcp.defaultMachinePlatform.osImage apply to both control plane and compute machines. If the project and name parameters under controlPlane.platform.gcp.osImage or compute.platform.gcp.osImage are set, they override the platform.gcp.defaultMachinePlatform.osImage parameters. 16 The cluster network plugin to install. The default value OVNKubernetes is the only supported value. 21 Specify the name of an existing VPC. 22 Specify the name of the existing subnet to deploy the control plane machines to. The subnet must belong to the VPC that you specified. 23 Specify the name of the existing subnet to deploy the compute machines to. The subnet must belong to the VPC that you specified. 25 Whether to enable or disable FIPS mode. By default, FIPS mode is not enabled. If FIPS mode is enabled, the Red Hat Enterprise Linux CoreOS (RHCOS) machines that OpenShift Container Platform runs on bypass the default Kubernetes cryptography suite and use the cryptography modules that are provided with RHCOS instead. Important To enable FIPS mode for your cluster, you must run the installation program from a Red Hat Enterprise Linux (RHEL) computer configured to operate in FIPS mode. For more information about configuring FIPS mode on RHEL, see Installing the system in FIPS mode . When running Red Hat Enterprise Linux (RHEL) or Red Hat Enterprise Linux CoreOS (RHCOS) booted in FIPS mode, OpenShift Container Platform core components use the RHEL cryptographic libraries that have been submitted to NIST for FIPS 140-2/140-3 Validation on only the x86_64, ppc64le, and s390x architectures. 26 You can optionally provide the sshKey value that you use to access the machines in your cluster. Note For production OpenShift Container Platform clusters on which you want to perform installation debugging or disaster recovery, specify an SSH key that your ssh-agent process uses. 27 How to publish the user-facing endpoints of your cluster. Set publish to Internal to deploy a private cluster, which cannot be accessed from the internet. The default value is External . Additional resources Enabling customer-managed encryption keys for a compute machine set 9.7.8. Create an Ingress Controller with global access on GCP You can create an Ingress Controller that has global access to a Google Cloud Platform (GCP) cluster. Global access is only available to Ingress Controllers using internal load balancers. Prerequisites You created the install-config.yaml and complete any modifications to it. Procedure Create an Ingress Controller with global access on a new GCP cluster. Change to the directory that contains the installation program and create a manifest file: USD ./openshift-install create manifests --dir <installation_directory> 1 1 For <installation_directory> , specify the name of the directory that contains the install-config.yaml file for your cluster. Create a file that is named cluster-ingress-default-ingresscontroller.yaml in the <installation_directory>/manifests/ directory: USD touch <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml 1 1 For <installation_directory> , specify the directory name that contains the manifests/ directory for your cluster. After creating the file, several network configuration files are in the manifests/ directory, as shown: USD ls <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml Example output cluster-ingress-default-ingresscontroller.yaml Open the cluster-ingress-default-ingresscontroller.yaml file in an editor and enter a custom resource (CR) that describes the Operator configuration you want: Sample clientAccess configuration to Global apiVersion: operator.openshift.io/v1 kind: IngressController metadata: name: default namespace: openshift-ingress-operator spec: endpointPublishingStrategy: loadBalancer: providerParameters: gcp: clientAccess: Global 1 type: GCP scope: Internal 2 type: LoadBalancerService 1 Set gcp.clientAccess to Global . 2 Global access is only available to Ingress Controllers using internal load balancers. 9.7.9. Configuring the cluster-wide proxy during installation Production environments can deny direct access to the internet and instead have an HTTP or HTTPS proxy available. You can configure a new OpenShift Container Platform cluster to use a proxy by configuring the proxy settings in the install-config.yaml file. Prerequisites You have an existing install-config.yaml file. You reviewed the sites that your cluster requires access to and determined whether any of them need to bypass the proxy. By default, all cluster egress traffic is proxied, including calls to hosting cloud provider APIs. You added sites to the Proxy object's spec.noProxy field to bypass the proxy if necessary. Note The Proxy object status.noProxy field is populated with the values of the networking.machineNetwork[].cidr , networking.clusterNetwork[].cidr , and networking.serviceNetwork[] fields from your installation configuration. For installations on Amazon Web Services (AWS), Google Cloud Platform (GCP), Microsoft Azure, and Red Hat OpenStack Platform (RHOSP), the Proxy object status.noProxy field is also populated with the instance metadata endpoint ( 169.254.169.254 ). Procedure Edit your install-config.yaml file and add the proxy settings. For example: apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5 1 A proxy URL to use for creating HTTP connections outside the cluster. The URL scheme must be http . 2 A proxy URL to use for creating HTTPS connections outside the cluster. 3 A comma-separated list of destination domain names, IP addresses, or other network CIDRs to exclude from proxying. Preface a domain with . to match subdomains only. For example, .y.com matches x.y.com , but not y.com . Use * to bypass the proxy for all destinations. 4 If provided, the installation program generates a config map that is named user-ca-bundle in the openshift-config namespace that contains one or more additional CA certificates that are required for proxying HTTPS connections. The Cluster Network Operator then creates a trusted-ca-bundle config map that merges these contents with the Red Hat Enterprise Linux CoreOS (RHCOS) trust bundle, and this config map is referenced in the trustedCA field of the Proxy object. The additionalTrustBundle field is required unless the proxy's identity certificate is signed by an authority from the RHCOS trust bundle. 5 Optional: The policy to determine the configuration of the Proxy object to reference the user-ca-bundle config map in the trustedCA field. The allowed values are Proxyonly and Always . Use Proxyonly to reference the user-ca-bundle config map only when http/https proxy is configured. Use Always to always reference the user-ca-bundle config map. The default value is Proxyonly . Note The installation program does not support the proxy readinessEndpoints field. Note If the installer times out, restart and then complete the deployment by using the wait-for command of the installer. For example: USD ./openshift-install wait-for install-complete --log-level debug Save the file and reference it when installing OpenShift Container Platform. The installation program creates a cluster-wide proxy that is named cluster that uses the proxy settings in the provided install-config.yaml file. If no proxy settings are provided, a cluster Proxy object is still created, but it will have a nil spec . Note Only the Proxy object named cluster is supported, and no additional proxies can be created. 9.8. Installing the OpenShift CLI You can install the OpenShift CLI ( oc ) to interact with OpenShift Container Platform from a command-line interface. You can install oc on Linux, Windows, or macOS. Important If you installed an earlier version of oc , you cannot use it to complete all of the commands in OpenShift Container Platform 4.17. Download and install the new version of oc . Installing the OpenShift CLI on Linux You can install the OpenShift CLI ( oc ) binary on Linux by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the architecture from the Product Variant drop-down list. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.17 Linux Clients entry and save the file. Unpack the archive: USD tar xvf <file> Place the oc binary in a directory that is on your PATH . To check your PATH , execute the following command: USD echo USDPATH Verification After you install the OpenShift CLI, it is available using the oc command: USD oc <command> Installing the OpenShift CLI on Windows You can install the OpenShift CLI ( oc ) binary on Windows by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.17 Windows Client entry and save the file. Unzip the archive with a ZIP program. Move the oc binary to a directory that is on your PATH . To check your PATH , open the command prompt and execute the following command: C:\> path Verification After you install the OpenShift CLI, it is available using the oc command: C:\> oc <command> Installing the OpenShift CLI on macOS You can install the OpenShift CLI ( oc ) binary on macOS by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.17 macOS Clients entry and save the file. Note For macOS arm64, choose the OpenShift v4.17 macOS arm64 Client entry. Unpack and unzip the archive. Move the oc binary to a directory on your PATH. To check your PATH , open a terminal and execute the following command: USD echo USDPATH Verification Verify your installation by using an oc command: USD oc <command> 9.9. Alternatives to storing administrator-level secrets in the kube-system project By default, administrator secrets are stored in the kube-system project. If you configured the credentialsMode parameter in the install-config.yaml file to Manual , you must use one of the following alternatives: To manage long-term cloud credentials manually, follow the procedure in Manually creating long-term credentials . To implement short-term credentials that are managed outside the cluster for individual components, follow the procedures in Configuring a GCP cluster to use short-term credentials . 9.9.1. Manually creating long-term credentials The Cloud Credential Operator (CCO) can be put into manual mode prior to installation in environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster kube-system namespace. Procedure Add the following granular permissions to the GCP account that the installation program uses: Example 9.3. Required GCP permissions compute.machineTypes.list compute.regions.list compute.zones.list dns.changes.create dns.changes.get dns.managedZones.create dns.managedZones.delete dns.managedZones.get dns.managedZones.list dns.networks.bindPrivateDNSZone dns.resourceRecordSets.create dns.resourceRecordSets.delete dns.resourceRecordSets.list If you did not set the credentialsMode parameter in the install-config.yaml configuration file to Manual , modify the value as shown: Sample configuration file snippet apiVersion: v1 baseDomain: example.com credentialsMode: Manual # ... If you have not previously created installation manifest files, do so by running the following command: USD openshift-install create manifests --dir <installation_directory> where <installation_directory> is the directory in which the installation program creates files. Set a USDRELEASE_IMAGE variable with the release image from your installation file by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Extract the list of CredentialsRequest custom resources (CRs) from the OpenShift Container Platform release image by running the following command: USD oc adm release extract \ --from=USDRELEASE_IMAGE \ --credentials-requests \ --included \ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \ 2 --to=<path_to_directory_for_credentials_requests> 3 1 The --included parameter includes only the manifests that your specific cluster configuration requires. 2 Specify the location of the install-config.yaml file. 3 Specify the path to the directory where you want to store the CredentialsRequest objects. If the specified directory does not exist, this command creates it. This command creates a YAML file for each CredentialsRequest object. Sample CredentialsRequest object apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator ... spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: GCPProviderSpec predefinedRoles: - roles/storage.admin - roles/iam.serviceAccountUser skipServiceCheck: true ... Create YAML files for secrets in the openshift-install manifests directory that you generated previously. The secrets must be stored using the namespace and secret name defined in the spec.secretRef for each CredentialsRequest object. Sample CredentialsRequest object with secrets apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator ... spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 ... secretRef: name: <component_secret> namespace: <component_namespace> ... Sample Secret object apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: service_account.json: <base64_encoded_gcp_service_account_file> Important Before upgrading a cluster that uses manually maintained credentials, you must ensure that the CCO is in an upgradeable state. 9.9.2. Configuring a GCP cluster to use short-term credentials To install a cluster that is configured to use GCP Workload Identity, you must configure the CCO utility and create the required GCP resources for your cluster. 9.9.2.1. Configuring the Cloud Credential Operator utility To create and manage cloud credentials from outside of the cluster when the Cloud Credential Operator (CCO) is operating in manual mode, extract and prepare the CCO utility ( ccoctl ) binary. Note The ccoctl utility is a Linux binary that must run in a Linux environment. Prerequisites You have access to an OpenShift Container Platform account with cluster administrator access. You have installed the OpenShift CLI ( oc ). You have added one of the following authentication options to the GCP account that the installation program uses: The IAM Workload Identity Pool Admin role. The following granular permissions: Example 9.4. Required GCP permissions compute.projects.get iam.googleapis.com/workloadIdentityPoolProviders.create iam.googleapis.com/workloadIdentityPoolProviders.get iam.googleapis.com/workloadIdentityPools.create iam.googleapis.com/workloadIdentityPools.delete iam.googleapis.com/workloadIdentityPools.get iam.googleapis.com/workloadIdentityPools.undelete iam.roles.create iam.roles.delete iam.roles.list iam.roles.undelete iam.roles.update iam.serviceAccounts.create iam.serviceAccounts.delete iam.serviceAccounts.getIamPolicy iam.serviceAccounts.list iam.serviceAccounts.setIamPolicy iam.workloadIdentityPoolProviders.get iam.workloadIdentityPools.delete resourcemanager.projects.get resourcemanager.projects.getIamPolicy resourcemanager.projects.setIamPolicy storage.buckets.create storage.buckets.delete storage.buckets.get storage.buckets.getIamPolicy storage.buckets.setIamPolicy storage.objects.create storage.objects.delete storage.objects.list Procedure Set a variable for the OpenShift Container Platform release image by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Obtain the CCO container image from the OpenShift Container Platform release image by running the following command: USD CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret) Note Ensure that the architecture of the USDRELEASE_IMAGE matches the architecture of the environment in which you will use the ccoctl tool. Extract the ccoctl binary from the CCO container image within the OpenShift Container Platform release image by running the following command: USD oc image extract USDCCO_IMAGE \ --file="/usr/bin/ccoctl.<rhel_version>" \ 1 -a ~/.pull-secret 1 For <rhel_version> , specify the value that corresponds to the version of Red Hat Enterprise Linux (RHEL) that the host uses. If no value is specified, ccoctl.rhel8 is used by default. The following values are valid: rhel8 : Specify this value for hosts that use RHEL 8. rhel9 : Specify this value for hosts that use RHEL 9. Change the permissions to make ccoctl executable by running the following command: USD chmod 775 ccoctl.<rhel_version> Verification To verify that ccoctl is ready to use, display the help file. Use a relative file name when you run the command, for example: USD ./ccoctl.rhel9 Example output OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use "ccoctl [command] --help" for more information about a command. 9.9.2.2. Creating GCP resources with the Cloud Credential Operator utility You can use the ccoctl gcp create-all command to automate the creation of GCP resources. Note By default, ccoctl creates objects in the directory in which the commands are run. To create the objects in a different directory, use the --output-dir flag. This procedure uses <path_to_ccoctl_output_dir> to refer to this directory. Prerequisites You must have: Extracted and prepared the ccoctl binary. Procedure Set a USDRELEASE_IMAGE variable with the release image from your installation file by running the following command: USD RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}') Extract the list of CredentialsRequest objects from the OpenShift Container Platform release image by running the following command: USD oc adm release extract \ --from=USDRELEASE_IMAGE \ --credentials-requests \ --included \ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \ 2 --to=<path_to_directory_for_credentials_requests> 3 1 The --included parameter includes only the manifests that your specific cluster configuration requires. 2 Specify the location of the install-config.yaml file. 3 Specify the path to the directory where you want to store the CredentialsRequest objects. If the specified directory does not exist, this command creates it. Note This command might take a few moments to run. Use the ccoctl tool to process all CredentialsRequest objects by running the following command: USD ccoctl gcp create-all \ --name=<name> \ 1 --region=<gcp_region> \ 2 --project=<gcp_project_id> \ 3 --credentials-requests-dir=<path_to_credentials_requests_directory> 4 1 Specify the user-defined name for all created GCP resources used for tracking. 2 Specify the GCP region in which cloud resources will be created. 3 Specify the GCP project ID in which cloud resources will be created. 4 Specify the directory containing the files of CredentialsRequest manifests to create GCP service accounts. Note If your cluster uses Technology Preview features that are enabled by the TechPreviewNoUpgrade feature set, you must include the --enable-tech-preview parameter. Verification To verify that the OpenShift Container Platform secrets are created, list the files in the <path_to_ccoctl_output_dir>/manifests directory: USD ls <path_to_ccoctl_output_dir>/manifests Example output cluster-authentication-02-config.yaml openshift-cloud-controller-manager-gcp-ccm-cloud-credentials-credentials.yaml openshift-cloud-credential-operator-cloud-credential-operator-gcp-ro-creds-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capg-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-gcp-pd-cloud-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-gcp-cloud-credentials-credentials.yaml You can verify that the IAM service accounts are created by querying GCP. For more information, refer to GCP documentation on listing IAM service accounts. 9.9.2.3. Incorporating the Cloud Credential Operator utility manifests To implement short-term security credentials managed outside the cluster for individual components, you must move the manifest files that the Cloud Credential Operator utility ( ccoctl ) created to the correct directories for the installation program. Prerequisites You have configured an account with the cloud platform that hosts your cluster. You have configured the Cloud Credential Operator utility ( ccoctl ). You have created the cloud provider resources that are required for your cluster with the ccoctl utility. Procedure Add the following granular permissions to the GCP account that the installation program uses: Example 9.5. Required GCP permissions compute.machineTypes.list compute.regions.list compute.zones.list dns.changes.create dns.changes.get dns.managedZones.create dns.managedZones.delete dns.managedZones.get dns.managedZones.list dns.networks.bindPrivateDNSZone dns.resourceRecordSets.create dns.resourceRecordSets.delete dns.resourceRecordSets.list If you did not set the credentialsMode parameter in the install-config.yaml configuration file to Manual , modify the value as shown: Sample configuration file snippet apiVersion: v1 baseDomain: example.com credentialsMode: Manual # ... If you have not previously created installation manifest files, do so by running the following command: USD openshift-install create manifests --dir <installation_directory> where <installation_directory> is the directory in which the installation program creates files. Copy the manifests that the ccoctl utility generated to the manifests directory that the installation program created by running the following command: USD cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/ Copy the tls directory that contains the private key to the installation directory: USD cp -a /<path_to_ccoctl_output_dir>/tls . 9.10. Deploying the cluster You can install OpenShift Container Platform on a compatible cloud platform. Important You can run the create cluster command of the installation program only once, during initial installation. Prerequisites You have configured an account with the cloud platform that hosts your cluster. You have the OpenShift Container Platform installation program and the pull secret for your cluster. You have verified that the cloud provider account on your host has the correct permissions to deploy the cluster. An account with incorrect permissions causes the installation process to fail with an error message that displays the missing permissions. Procedure Remove any existing GCP credentials that do not use the service account key for the GCP account that you configured for your cluster and that are stored in the following locations: The GOOGLE_CREDENTIALS , GOOGLE_CLOUD_KEYFILE_JSON , or GCLOUD_KEYFILE_JSON environment variables The ~/.gcp/osServiceAccount.json file The gcloud cli default credentials Change to the directory that contains the installation program and initialize the cluster deployment: USD ./openshift-install create cluster --dir <installation_directory> \ 1 --log-level=info 2 1 For <installation_directory> , specify the location of your customized ./install-config.yaml file. 2 To view different installation details, specify warn , debug , or error instead of info . Optional: You can reduce the number of permissions for the service account that you used to install the cluster. If you assigned the Owner role to your service account, you can remove that role and replace it with the Viewer role. If you included the Service Account Key Admin role, you can remove it. Verification When the cluster deployment completes successfully: The terminal displays directions for accessing your cluster, including a link to the web console and credentials for the kubeadmin user. Credential information also outputs to <installation_directory>/.openshift_install.log . Important Do not delete the installation program or the files that the installation program creates. Both are required to delete the cluster. Example output ... INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: "kubeadmin", and password: "password" INFO Time elapsed: 36m22s Important The Ignition config files that the installation program generates contain certificates that expire after 24 hours, which are then renewed at that time. If the cluster is shut down before renewing the certificates and the cluster is later restarted after the 24 hours have elapsed, the cluster automatically recovers the expired certificates. The exception is that you must manually approve the pending node-bootstrapper certificate signing requests (CSRs) to recover kubelet certificates. See the documentation for Recovering from expired control plane certificates for more information. It is recommended that you use Ignition config files within 12 hours after they are generated because the 24-hour certificate rotates from 16 to 22 hours after the cluster is installed. By using the Ignition config files within 12 hours, you can avoid installation failure if the certificate update runs during installation. 9.11. Logging in to the cluster by using the CLI You can log in to your cluster as a default system user by exporting the cluster kubeconfig file. The kubeconfig file contains information about the cluster that is used by the CLI to connect a client to the correct cluster and API server. The file is specific to a cluster and is created during OpenShift Container Platform installation. Prerequisites You deployed an OpenShift Container Platform cluster. You installed the oc CLI. Procedure Export the kubeadmin credentials: USD export KUBECONFIG=<installation_directory>/auth/kubeconfig 1 1 For <installation_directory> , specify the path to the directory that you stored the installation files in. Verify you can run oc commands successfully using the exported configuration: USD oc whoami Example output system:admin Additional resources See Accessing the web console for more details about accessing and understanding the OpenShift Container Platform web console. 9.12. Telemetry access for OpenShift Container Platform In OpenShift Container Platform 4.17, the Telemetry service, which runs by default to provide metrics about cluster health and the success of updates, requires internet access. If your cluster is connected to the internet, Telemetry runs automatically, and your cluster is registered to OpenShift Cluster Manager . After you confirm that your OpenShift Cluster Manager inventory is correct, either maintained automatically by Telemetry or manually by using OpenShift Cluster Manager, use subscription watch to track your OpenShift Container Platform subscriptions at the account or multi-cluster level. Additional resources See About remote health monitoring for more information about the Telemetry service 9.13. steps Customize your cluster . If necessary, you can opt out of remote health reporting .
[ "ssh-keygen -t ed25519 -N '' -f <path>/<file_name> 1", "cat <path>/<file_name>.pub", "cat ~/.ssh/id_ed25519.pub", "eval \"USD(ssh-agent -s)\"", "Agent pid 31874", "ssh-add <path>/<file_name> 1", "Identity added: /home/<you>/<path>/<file_name> (<computer_name>)", "tar -xvf openshift-install-linux.tar.gz", "mkdir <installation_directory>", "compute: - architecture: amd64 hyperthreading: Enabled name: worker platform: gcp: type: custom-6-20480 replicas: 2 controlPlane: architecture: amd64 hyperthreading: Enabled name: master platform: gcp: type: custom-6-20480 replicas: 3", "controlPlane: platform: gcp: secureBoot: Enabled", "compute: - platform: gcp: secureBoot: Enabled", "platform: gcp: defaultMachinePlatform: secureBoot: Enabled", "controlPlane: platform: gcp: confidentialCompute: Enabled 1 type: n2d-standard-8 2 onHostMaintenance: Terminate 3", "compute: - platform: gcp: confidentialCompute: Enabled type: n2d-standard-8 onHostMaintenance: Terminate", "platform: gcp: defaultMachinePlatform: confidentialCompute: Enabled type: n2d-standard-8 onHostMaintenance: Terminate", "apiVersion: v1 baseDomain: example.com 1 credentialsMode: Mint 2 controlPlane: 3 4 hyperthreading: Enabled 5 name: master platform: gcp: type: n2-standard-4 zones: - us-central1-a - us-central1-c osDisk: diskType: pd-ssd diskSizeGB: 1024 encryptionKey: 6 kmsKey: name: worker-key keyRing: test-machine-keys location: global projectID: project-id tags: 7 - control-plane-tag1 - control-plane-tag2 osImage: 8 project: example-project-name name: example-image-name replicas: 3 compute: 9 10 - hyperthreading: Enabled 11 name: worker platform: gcp: type: n2-standard-4 zones: - us-central1-a - us-central1-c osDisk: diskType: pd-standard diskSizeGB: 128 encryptionKey: 12 kmsKey: name: worker-key keyRing: test-machine-keys location: global projectID: project-id tags: 13 - compute-tag1 - compute-tag2 osImage: 14 project: example-project-name name: example-image-name replicas: 3 metadata: name: test-cluster 15 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 machineNetwork: - cidr: 10.0.0.0/16 networkType: OVNKubernetes 16 serviceNetwork: - 172.30.0.0/16 platform: gcp: projectID: openshift-production 17 region: us-central1 18 defaultMachinePlatform: tags: 19 - global-tag1 - global-tag2 osImage: 20 project: example-project-name name: example-image-name network: existing_vpc 21 controlPlaneSubnet: control_plane_subnet 22 computeSubnet: compute_subnet 23 pullSecret: '{\"auths\": ...}' 24 fips: false 25 sshKey: ssh-ed25519 AAAA... 26 publish: Internal 27", "./openshift-install create manifests --dir <installation_directory> 1", "touch <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml 1", "ls <installation_directory>/manifests/cluster-ingress-default-ingresscontroller.yaml", "cluster-ingress-default-ingresscontroller.yaml", "apiVersion: operator.openshift.io/v1 kind: IngressController metadata: name: default namespace: openshift-ingress-operator spec: endpointPublishingStrategy: loadBalancer: providerParameters: gcp: clientAccess: Global 1 type: GCP scope: Internal 2 type: LoadBalancerService", "apiVersion: v1 baseDomain: my.domain.com proxy: httpProxy: http://<username>:<pswd>@<ip>:<port> 1 httpsProxy: https://<username>:<pswd>@<ip>:<port> 2 noProxy: example.com 3 additionalTrustBundle: | 4 -----BEGIN CERTIFICATE----- <MY_TRUSTED_CA_CERT> -----END CERTIFICATE----- additionalTrustBundlePolicy: <policy_to_add_additionalTrustBundle> 5", "./openshift-install wait-for install-complete --log-level debug", "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: GCPProviderSpec predefinedRoles: - roles/storage.admin - roles/iam.serviceAccountUser skipServiceCheck: true", "apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest metadata: name: <component_credentials_request> namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 secretRef: name: <component_secret> namespace: <component_namespace>", "apiVersion: v1 kind: Secret metadata: name: <component_secret> namespace: <component_namespace> data: service_account.json: <base64_encoded_gcp_service_account_file>", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "CCO_IMAGE=USD(oc adm release info --image-for='cloud-credential-operator' USDRELEASE_IMAGE -a ~/.pull-secret)", "oc image extract USDCCO_IMAGE --file=\"/usr/bin/ccoctl.<rhel_version>\" \\ 1 -a ~/.pull-secret", "chmod 775 ccoctl.<rhel_version>", "./ccoctl.rhel9", "OpenShift credentials provisioning tool Usage: ccoctl [command] Available Commands: aws Manage credentials objects for AWS cloud azure Manage credentials objects for Azure gcp Manage credentials objects for Google cloud help Help about any command ibmcloud Manage credentials objects for {ibm-cloud-title} nutanix Manage credentials objects for Nutanix Flags: -h, --help help for ccoctl Use \"ccoctl [command] --help\" for more information about a command.", "RELEASE_IMAGE=USD(./openshift-install version | awk '/release image/ {print USD3}')", "oc adm release extract --from=USDRELEASE_IMAGE --credentials-requests --included \\ 1 --install-config=<path_to_directory_with_installation_configuration>/install-config.yaml \\ 2 --to=<path_to_directory_for_credentials_requests> 3", "ccoctl gcp create-all --name=<name> \\ 1 --region=<gcp_region> \\ 2 --project=<gcp_project_id> \\ 3 --credentials-requests-dir=<path_to_credentials_requests_directory> 4", "ls <path_to_ccoctl_output_dir>/manifests", "cluster-authentication-02-config.yaml openshift-cloud-controller-manager-gcp-ccm-cloud-credentials-credentials.yaml openshift-cloud-credential-operator-cloud-credential-operator-gcp-ro-creds-credentials.yaml openshift-cloud-network-config-controller-cloud-credentials-credentials.yaml openshift-cluster-api-capg-manager-bootstrap-credentials-credentials.yaml openshift-cluster-csi-drivers-gcp-pd-cloud-credentials-credentials.yaml openshift-image-registry-installer-cloud-credentials-credentials.yaml openshift-ingress-operator-cloud-credentials-credentials.yaml openshift-machine-api-gcp-cloud-credentials-credentials.yaml", "apiVersion: v1 baseDomain: example.com credentialsMode: Manual", "openshift-install create manifests --dir <installation_directory>", "cp /<path_to_ccoctl_output_dir>/manifests/* ./manifests/", "cp -a /<path_to_ccoctl_output_dir>/tls .", "./openshift-install create cluster --dir <installation_directory> \\ 1 --log-level=info 2", "INFO Install complete! INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=/home/myuser/install_dir/auth/kubeconfig' INFO Access the OpenShift web-console here: https://console-openshift-console.apps.mycluster.example.com INFO Login to the console with user: \"kubeadmin\", and password: \"password\" INFO Time elapsed: 36m22s", "export KUBECONFIG=<installation_directory>/auth/kubeconfig 1", "oc whoami", "system:admin" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.17/html/installing_on_gcp/installing-gcp-private
Cache Encoding and Marshalling
Cache Encoding and Marshalling Red Hat Data Grid 8.4 Encode Data Grid caches and marshall Java objects Red Hat Customer Content Services
null
https://docs.redhat.com/en/documentation/red_hat_data_grid/8.4/html/cache_encoding_and_marshalling/index
Node APIs
Node APIs OpenShift Container Platform 4.13 Reference guide for node APIs Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html-single/node_apis/index
Preface
Preface Red Hat OpenShift Data Foundation supports deployment on existing Red Hat OpenShift Container Platform (RHOCP) bare metal clusters in connected or disconnected environments along with out-of-the-box support for proxy environments. Both internal and external OpenShift Data Foundation clusters are supported on bare metal. See Planning your deployment and Preparing to deploy OpenShift Data Foundation for more information about deployment requirements. To deploy OpenShift Data Foundation, follow the appropriate deployment process based on your requirement: Internal mode Deploy using local storage devices Deploy standalone Multicloud Object Gateway component External mode
null
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.14/html/deploying_openshift_data_foundation_using_bare_metal_infrastructure/preface-baremetal
1.2. Installing the Ruby Software Development Kit
1.2. Installing the Ruby Software Development Kit Enable the required repositories: Install the Ruby Software Development Kit: Alternatively, you can install with gem :
[ "subscription-manager repos --enable=rhel-8-for-x86_64-baseos-rpms --enable=rhel-8-for-x86_64-appstream-rpms --enable=rhv-4.4-manager-for-rhel-8-x86_64-rpms", "dnf install rubygem-ovirt-engine-sdk4", "gem install ovirt-engine-sdk" ]
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.4/html/ruby_sdk_guide/installing_the_ruby_sdk
Providing feedback on Red Hat documentation
Providing feedback on Red Hat documentation We appreciate your input on our documentation. Tell us how we can make it better. Providing documentation feedback in Jira Use the Create Issue form to provide feedback on the documentation. The Jira issue will be created in the Red Hat OpenStack Platform Jira project, where you can track the progress of your feedback. Ensure that you are logged in to Jira. If you do not have a Jira account, create an account to submit feedback. Click the following link to open a the Create Issue page: Create Issue Complete the Summary and Description fields. In the Description field, include the documentation URL, chapter or section number, and a detailed description of the issue. Do not modify any other fields in the form. Click Create .
null
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/block_storage_backup_guide/proc_providing-feedback-on-red-hat-documentation
24.2.4. Directories
24.2.4. Directories Use the Directories page in the Performance tab to configure options for specific directories. This corresponds to the <Directory> directive. Figure 24.6. Directories Click the Edit button in the top right-hand corner to configure the Default Directory Options for all directories that are not specified in the Directory list below it. The options that you choose are listed as the Options directive within the <Directory> directive. You can configure the following options: ExecCGI - Allow execution of CGI scripts. CGI scripts are not executed if this option is not chosen. FollowSymLinks - Allow symbolic links to be followed. Includes - Allow server-side includes. IncludesNOEXEC - Allow server-side includes, but disable the #exec and #include commands in CGI scripts. Indexes - Display a formatted list of the directory's contents, if no DirectoryIndex (such as index.html ) exists in the requested directory. Multiview - Support content-negotiated multiviews; this option is disabled by default. SymLinksIfOwnerMatch - Only follow symbolic links if the target file or directory has the same owner as the link. To specify options for specific directories, click the Add button beside the Directory list box. A window as shown in Figure 24.7, "Directory Settings" appears. Enter the directory to configure in the Directory text field at the bottom of the window. Select the options in the right-hand list and configure the Order directive with the left-hand side options. The Order directive controls the order in which allow and deny directives are evaluated. In the Allow hosts from and Deny hosts from text field, you can specify one of the following: Allow all hosts - Type all to allow access to all hosts. Partial domain name - Allow all hosts whose names match or end with the specified string. Full IP address - Allow access to a specific IP address. A subnet - Such as 192.168.1.0/255.255.255.0 A network CIDR specification - such as 10.3.0.0/16 Figure 24.7. Directory Settings If you check the Let .htaccess files override directory options , the configuration directives in the .htaccess file take precedence.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/system_administration_guide/default_settings-directories
Chapter 28. Hit policies for guided decision tables
Chapter 28. Hit policies for guided decision tables Hit policies determine the order in which rules (rows) in a guided decision table are applied, whether top to bottom, per specified priority, or other options. The following hit policies are available: None: (Default hit policy) Multiple rows can be executed and the verification warns about rows that conflict. Any decision tables that have been uploaded (using a non-guided decision table spreadsheet) will adopt this hit policy. Resolved Hit: Only one row at a time can be executed according to specified priority, regardless of list order (you can give row 10 priority over row 5, for example). This means you can keep the order of the rows you want for visual readability, but specify priority exceptions. Unique Hit: Only one row at a time can be executed, and each row must be unique, with no overlap of conditions being met. If more than one row is executed, then the verification produces a warning at development time. First Hit: Only one row at a time can be executed in the order listed in the table, top to bottom. Rule Order: Multiple rows can be executed and verification does not report conflicts between the rows since they are expected to happen. Figure 28.1. Available hit policies 28.1. Hit policy examples: Decision table for discounts on movie tickets The following is part of an example decision table for discounts on movie tickets based on customer age, student status, or military status, or all three. Table 28.1. Example decision table for available discounts on movie tickets Row Number Discount Type Discount 1 Senior citizen (age 60+) 10% 2 Student 10% 3 Military 10% In this example, the total discount to be applied in the end will vary depending on the hit policy specified for the table: None/Rule Order: With both None and Rule Order hit policies, all applicable rules are incorporated, in this case allowing discounts to be stacked for each customer. Example: A senior citizen who is also a student and a military veteran will receive all three discounts, totaling 30%. Key difference: With None , warnings are created for multiple rows applied. With Rule Order , those warnings are not created. First Hit/Resolved Hit: With both First Hit and Resolved Hit policies, only one of the discounts can be applied. For First Hit , the discount that is satisfied first in the list is applied and the others are ignored. Example: A senior citizen who is also a student and a military veteran will receive only the senior citizen discount of 10%, since that is listed first in the table. For Resolved Hit , a modified table is required. The discount that you assign a priority exception to in the table, regardless of listed order, will be applied first. To assign this exception, include a new column that specifies the priority of one discount (row) over others. Example: If military discounts are prioritized higher than age or student discounts, despite the listed order, then a senior citizen who is also a student and a military veteran will receive only the military discount of 10%, regardless of age or student status. Consider the following modified decision table that accommodates a Resolved Hit policy: Table 28.2. Modified decision table that accommodates a Resolved Hit policy Row Number Discount Type Has Priority over Row Discount 1 Senior citizen (age 60+) 10% 2 Student 10% 3 Military 1 10% In this modified table, the military discount is essentially the new row 1 and therefore takes priority over both age and student discounts, and any other discounts added later. You do not need to specify priority over rows "1 and 2", only over row "1". This changes the row hit order to 3 1 2 ... and so on as the table grows. Note The row order would be changed in the same way if you actually moved the military discount to row 1 and applied a First Hit policy to the table instead. However, if you want the rules listed in a certain way and applied differently, such as in an alphabetized table, the Resolved Hit policy is useful. Key difference: With First Hit , rules are applied strictly in the listed order. With Resolved Hit , rules are applied in the listed order unless priority exceptions are specified. Unique Hit: A modified table is required. With a Unique Hit policy, rows must be created in a way that it is impossible to satisfy multiple rules at one time. However, you can still specify row-by-row whether to apply one rule or multiple. In this way, with a Unique Hit policy you can make decision tables more granular and prevent overlap warnings. Consider the following modified decision table that accommodates a Unique Hit policy: Table 28.3. Modified decision table that accommodates a Unique Hit policy Row Number Is Senior Citizen (age 65+) Is Student Is Military Discount 1 yes no no 10% 2 no yes no 10% 3 no no yes 10% 4 yes yes no 20% 5 yes no yes 20% 6 no yes yes 20% 7 yes yes yes 30% In this modified table, each row is unique, with no allowance of overlap, and any single discount or any combination of discounts is accommodated. 28.1.1. Types of guided decision tables Two types of decision tables are supported in Red Hat Process Automation Manager: Extended entry and Limited entry tables. Extended entry: An Extended Entry decision table is one for which the column definitions specify Pattern, Field, and Operator but not value. The values, or states, are themselves held in the body of the decision table. Limited entry: A Limited Entry decision table is one for which the column definitions specify value in addition to Pattern, Field, and Operator. The decision table states, held in the body of the table, are boolean where a positive value (a marked check box) has the effect of meaning the column should apply, or be matched. A negative value (a cleared check box) means the column does not apply.
null
https://docs.redhat.com/en/documentation/red_hat_process_automation_manager/7.13/html/developing_decision_services_in_red_hat_process_automation_manager/hit-policies-con
Chapter 7. Hiding header output from Hammer commands
Chapter 7. Hiding header output from Hammer commands When you use any hammer command, you have the option of hiding headers from the output. If you want to pipe or use the output in custom scripts, hiding the output is useful. To hide the header output, add the --no-headers option to any hammer command.
null
https://docs.redhat.com/en/documentation/red_hat_satellite/6.16/html/using_the_hammer_cli_tool/hiding-header-output-from-hammer-commands
Chapter 2. Migrating the ML2 mechanism driver from OVS to OVN
Chapter 2. Migrating the ML2 mechanism driver from OVS to OVN 2.1. Preparing the environment for migration of the ML2 mechanism driver from OVS to OVN Environment assessment and preparation is critical to a successful migration. Your Red Hat Technical Account Manager or Global Professional Services will guide you through these steps. Prerequisites Your pre-migration deployment is Red Hat OpenStack Platform (RHOSP) 16.2 or later. Your RHOSP deployment is up to date. In other words, if you need to upgrade or update your OpenStack version, perform the upgrade or update first, and then perform the ML2/OVS to ML2/OVN migration. At least one IP address is available for each subnet pool. The OVN mechanism driver creates a metadata port for each subnet. Each metadata port claims an IP address from the IP address pool. You have worked with your Red Hat Technical Account Manager or Global Professional Services to plan the migration and have filed a proactive support case. See How to submit a Proactive Case . Procedure Create an ML2/OVN stage deployment to obtain the baseline configuration of your target ML2/OVN deployment and test the feasibility of the target deployment. Design the stage deployment with the same basic roles, routing, and topology as the planned post-migration production deployment. Save the overcloud-deploy.sh file and any files referenced by the deployment, such as environment files. You need these files later in this procedure to configure the migration target environment. Note Use these files only for creation of the stage deployment and in the migration. Do not re-use them after the migration. If your ML2/OVS deployment uses VXLAN or GRE project networks, schedule for a waiting period of up to 24 hours after the setup-mtu-t1 step. This waiting period allows the VM instances to renew their DHCP leases and receive the new MTU value. During this time you might need to manually set MTUs on some instances and reboot some instances. 24 hours is the time based on default configuration of 86400 seconds. The actual time depends on /var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini dhcp_renewal_time and /var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf dhcp_lease_duration parameters. Install python3-networking-ovn-migration-tool. The @container-tools argument also installs the container tools if they are not already present. Create a directory on the undercloud, and copy the Ansible playbooks: Copy your ML2/OVN stage deployment files to the migration home directory, such as ~/ovn_migration . The stage migration deployment files include overcloud-deploy.sh and any files referenced by the deployment, such as environment files. Rename the copy of overcloud-deploy.sh to overcloud-deploy-ovn.sh . Use this script for migration only. Do not use it for other purposes. Find your migration scenario in the following list and perform the appropriate steps to customize the openstack deploy command in overcloud-deploy-ovn.sh . Scenario 1: DVR to DVR, compute nodes have connectivity to the external network Add the following environment files to the openstack deploy command in overcloud-deploy-ovn.sh. Add them in the order shown. This command example uses the default neutron-ovn-dvr-ha.yaml file. If you use a different file, replace the file name in the command. Scenario 2: Centralized routing to centralized routing (no DVR) If your deployment uses SR-IOV, add the service definition OS::TripleO::Services::OVNMetadataAgent to the Controller role in the file roles_data.yaml . Preserve the pre-migration custom bridge mappings. Run this command on a networker or combined networker/controller node to get the current bridge mappings: Example output On the undercloud, create an environment file for the bridge mappings: /home/stack/neutron_bridge_mappings.yaml . Set the defaults in the environment file. For example: Add the following environment files to the openstack deploy command in overcloud-deploy-ovn.sh. Add them in the order shown. If your environment does not use SR-IOV, omit the neutron-ovn-sriov.yaml file. The file ovn-extras.yaml does not exist yet but it is created by the script ovn_migration.sh before the openstack deploy command is run. Leave any custom network modifications the same as they were before migration. Scenario 3: Centralized routing to DVR, with Geneve type driver, and compute nodes connected to external networks through br-ex Warning If your ML2/OVS deployment uses centralized routing and VLAN project (tenant) networks, do not migrate to ML2/OVN with DVR. You can migrate to ML2/OVN with centralized routing. To track progress on this limitation, see https://bugzilla.redhat.com/show_bug.cgi?id=1766930 . Ensure that compute nodes are connected to the external network through the br-ex bridge. For example, in an environment file such as compute-dvr.yaml, set the following: Ensure that all users have execution privileges on the file overcloud-deploy-ovn.sh . The script requires execution privileges during the migration process. Use export commands to set the following migration-related environment variables. For example: STACKRC_FILE - the stackrc file in your undercloud. Default: ~/stackrc OVERCLOUDRC_FILE - the overcloudrc file in your undercloud. Default: ~/overcloudrc OVERCLOUD_OVN_DEPLOY_SCRIPT - the deployment script. Default: ~/overcloud-deploy-ovn.sh PUBLIC_NETWORK_NAME - the name of your public network. Default: public . IMAGE_NAME - the name or ID of the glance image to use to boot a test server. Default: cirros . The image is automatically downloaded during the pre-validation / post-validation process. VALIDATE_MIGRATION - Create migration resources to validate the migration. Before starting the migration, the migration script boots a server and validates that the server is reachable after the migration. Default: True. Warning Migration validation requires at least two available floating IP addresses, two networks, two subnets, two instances, and two routers as admin. Also, the network specified by PUBLIC_NETWORK_NAME must have available floating IP addresses, and you must be able to ping them from the undercloud. If your environment does not meet these requirements, set VALIDATE_MIGRATION to False. SERVER_USER_NAME - User name to use for logging to the migration instances. Default: cirros . DHCP_RENEWAL_TIME - DHCP renewal time in seconds to configure in DHCP agent configuration file. Default: 30 Ensure you are in the ovn-migration directory and run the command ovn_migration.sh generate-inventory to generate the inventory file hosts_for_migration and the ansible.cfg file. Review the hosts_for_migration file for accuracy. Ensure the lists match your environment. Ensure there are ovn controllers on each node. Ensure there are no list headings (such as [ovn-controllers]) that do not have list items under them. From the ovn migration directory, run the command ansible -i hosts_for_migration -m ping all If your original deployment uses VXLAN or GRE, you need to adjust maximum transmission unit (MTU) values. Proceed to Adjusting MTU for migration from the OVS mechanism driver to the OVN mechanism driver . If your original deployment uses VLAN networks, you can skip the MTU adjustments and proceed to Preparing container images for migration from the OVS mechanism driver to the OVN mechanism driver . 2.2. Adjusting MTU for migration of the ML2 mechanism driver from OVS to OVN If you are migrating from RHOSP 17.0 with the OVN mechanism driver with VXLAN or GRE to the OVN mechanism driver with Geneve, you must ensure that the maximum transmission unit (MTU) settings are smaller than or equal to the smallest MTU in the network. If your current deployment uses VLAN instead of VXLAN or GRE, skip this procedure and proceed to Preparing container images for migration from the OVS mechanism driver to the OVN mechanism driver . Prerequisites You have completed the steps in Preparing the environment for migration from the OVS mechanism driver to the OVN mechanism driver . Your pre-migration deployment is Red Hat OpenStack Platform (RHOSP) 16.2 or later with VXLAN or GRE. Procedure Run ovn_migration.sh setup-mtu-t1 . This lowers the T1 parameter of the internal neutron DHCP servers that configure the dhcp_renewal_time in /var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini in all the nodes where DHCP agent is running. If your original OVS deployment uses VXLAN or GRE project networking, wait until the DHCP leases have been renewed on all VM instances. This can take up to 24 hours depending on lease renewal settings and the number of instances. Verify that the T1 parameter has propagated to existing VMs. Connect to one of the compute nodes. Run tcpdump` over one of the VM taps attached to a project network. If T1 propagation is successful, expect to see requests occur approximately every 30 seconds: Note This verification is not possible with cirros VMs. The cirros udhcpc` implementation does not respond to DHCP option 58 (T1). Try this verification on a port that belongs to a full Linux VM. Red Hat recommends that you check all the different operating systems represented in your workloads, such as variants of Windows and Linux distributions. If any VM instances were not updated to reflect the change to the T1 parameter of DHCP, reboot them. Lower the MTU of the pre-migration VXLAN and GRE networks: This step reduces the MTU network by network and tags the completed network with adapted_mtu. The tool acts only on VXLAN and GRE networks. This step will not change any values if your deployment has only VLAN project networks. If you have any instances with static IP assignment on VXLAN or GRE project networks, manually modify the configuration of those instances to configure the new Geneve MTU, which is the current VXLAN MTU minus 8 bytes. For example, if the VXLAN-based MTU was 1450, change it to 1442. Note Perform this step only if you have manually provided static IP assignments and MTU settings on VXLAN or GRE project networks. By default, DHCP provides the IP assignment and MTU settings. Proceed to Preparing container images for migration from the OVS mechanism driver to the OVN mechanism driver . 2.3. Preparing container images for migration of the ML2 mechanism driver from OVS to OVN Environment assessment and preparation is critical to a successful migration. Your Red Hat Technical Account Manager or Global Professional Services will guide you through these steps. Prerequisites You have completed the steps in Preparing the environment for migration of the ML2 mechanism driver from OVS to OVN If your original deployment uses VXLAN or GRE, you also completed the steps in Adjusting MTU for migration from the OVS mechanism driver to the OVN mechanism driver . Procedure Prepare the new container images for use after the migration to ML2/OVN. Create containers-prepare-parameter.yaml file in the home directory if it is not present. Verify that containers-prepare-parameter.yaml is present at the end of your USDHOME/overcloud-deploy-ovn.sh and USDHOME/overcloud-deploy.sh files. Change the neutron_driver in the containers-prepare-parameter.yaml file to ovn: Verify the changes to the neutron_driver: Update the images: Note Provide the full path to your containers-prepare-parameter.yaml file. Otherwise, the command completes very quickly without updating the image list or providing an error message. On the undercloud, validate the updated images. Your list should resemble the following example. It includes containers for the OVN databases, OVN controller, the metadata agent, and the neutron server agent. Proceed to Migrating from ML2/OVS to ML2/OVN . 2.4. Migrating the ML2 mechanism driver from OVS to OVN The ovn-migration script performs environmental setup, migration, and cleanup tasks related to the in-place migration of the ML2 mechanism driver from OVS to OVN. Prerequisites You have completed the steps in Preparing the environment for migration of the ML2 mechanism driver from OVS to OVN If your original deployment uses VXLAN or GRE, you also completed the steps in Adjusting MTU for migration from the OVS mechanism driver to the OVN mechanism driver . You also completed all required migration steps through Preparing container images for migration from the OVS mechanism driver to the OVN mechanism driver . Procedure Stop all operations that interact with the Networking Service (neutron) API, such as creating new networks, subnets, or routers, or migrating virtual machine instances between compute nodes. Interaction with Networking API during migration can cause undefined behavior. You can restart the API operations after completing the migration. Run ovn_migration.sh start-migration to begin the migration process. The tee command creates a copy of the script output for troubleshooting purposes. Result The script performs the following actions. Creates pre-migration resources (network and VM) to validate existing deployment and final migration. Updates the overcloud stack to deploy OVN alongside reference implementation services using the temporary bridge br-migration instead of br-int. The temporary bridge helps to limit downtime during migration. Generates the OVN northbound database by running neutron-ovn-db-sync-util. The utility examines the Neutron database to create equivalent resources in the OVN northbound database. Clones the existing resources from br-int to br-migration, to allow ovn to find the same resource UUIDS over br-migration. Re-assigns ovn-controller to br-int instead of br-migration. Removes node resources that are not used by ML2/OVN, including the following. Cleans up network namespaces (fip, snat, qrouter, qdhcp). Removes any unnecessary patch ports on br-int . Removes br-tun and br-migration ovs bridges. Deletes ports from br-int that begin with qr- , ha- , and qg- (using neutron-netns-cleanup). Deletes Networking Service (neutron) agents and Networking Service HA internal networks from the database through the Networking Service API. Validates connectivity on pre-migration resources. Deletes pre-migration resources. Creates post-migration resources. Validates connectivity on post-migration resources. Cleans up post-migration resources. Re-runs the deployment tool to update OVN on br-int .
[ "sudo dnf install python3-networking-ovn-migration-tool @container-tools", "mkdir ~/ovn_migration cd ~/ovn_migration cp -rfp /usr/share/ansible/neutron-ovn-migration/playbooks .", "-e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-dvr-ha.yaml -e USDHOME/ovn-extras.yaml", "sudo podman exec -it neutron_ovs_agent crudini --get /etc/neutron/plugins/ml2/openvswitch_agent.ini ovs bridge_mappings", "datacentre:br-ex,tenant:br-isolated", "parameter_defaults: ComputeParameters: NeutronBridgeMappings: \"datacentre:br-ex,tenant:br-isolated\"", "-e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-ha.yaml -e /usr/share/openstack-tripleo-heat-templates/environments/services/neutron-ovn-sriov.yaml -e /home/stack/ovn-extras.yaml -e /home/stack/neutron_bridge_mappings.yaml", "type: ovs_bridge # Defaults to br-ex, anything else requires specific # bridge mapping entries for it to be used. name: bridge_name use_dhcp: false members: - type: interface name: nic3 # force the MAC address of the bridge to this interface primary: true", "chmod a+x ~/overcloud-deploy-ovn.sh", "export PUBLIC_NETWORK_NAME=my-public-network", "ovn_migration.sh generate-inventory | sudo tee -a /var/log/ovn_migration_output.txt", "ovn_migration.sh setup-mtu-t1 | sudo tee -a /var/log/ovn_migration_output.txt", "[heat-admin@overcloud-novacompute-0 ~]USD sudo tcpdump -i tap52e872c2-e6 port 67 or port 68 -n tcpdump: verbose output suppressed, use -v or -vv for full protocol decode listening on tap52e872c2-e6, link-type EN10MB (Ethernet), capture size 262144 bytes 13:17:28.954675 IP 192.168.99.5.bootpc > 192.168.99.3.bootps: BOOTP/DHCP, Request from fa:16:3e:6b:41:3d, length 300 13:17:28.961321 IP 192.168.99.3.bootps > 192.168.99.5.bootpc: BOOTP/DHCP, Reply, length 355 13:17:56.241156 IP 192.168.99.5.bootpc > 192.168.99.3.bootps: BOOTP/DHCP, Request from fa:16:3e:6b:41:3d, length 30013:17:56.249899 IP 192.168.99.3.bootps > 192.168.99.5.bootpc: BOOTP/DHCP, Reply, length 355", "ovn_migration.sh reduce-mtu | sudo tee -a /var/log/ovn_migration_output.txt", "test -f USDHOME/containers-prepare-parameter.yaml || sudo openstack tripleo container image prepare default --output-env-file USDHOME/containers-prepare-parameter.yaml", "sed -i -E 's/neutron_driver:([ ]\\w+)/neutron_driver: ovn/' USDHOME/containers-prepare-parameter.yaml", "grep neutron_driver USDHOME/containers-prepare-parameter.yaml neutron_driver: ovn", "sudo openstack tripleo container image prepare --environment-file /home/stack/containers-prepare-parameter.yaml", ". Log in to the undercloud as the user `stack` and source the stackrc file. source ~/stackrc openstack tripleo container image list | grep '\\-ovn'", "docker://undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-ovn-northd:16.2_20211110.2 docker://undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-ovn-sb-db-server:16.2_20211110.2 docker://undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-ovn-controller:16.2_20211110.2 docker://undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-neutron-server-ovn:16.2_20211110.2 docker://undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-ovn-nb-db-server:16.2_20211110.2 docker://undercloud-0.ctlplane.redhat.local:8787/rh-osbs/rhosp16-openstack-neutron-metadata-agent-ovn:16.2_20211110.2", "ovn_migration.sh start-migration | sudo tee -a /var/log/ovn_migration_output.txt" ]
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/17.0/html/testing_migration_of_the_networking_service_to_the_ml2ovn_mechanism_driver/migrating-ovs-to-ovn
3.2. Ignoring Local Disks when Generating Multipath Devices
3.2. Ignoring Local Disks when Generating Multipath Devices Some machines have local SCSI cards for their internal disks. DM-Multipath is not recommended for these devices. The following procedure shows how to modify the multipath configuration file to ignore the local disks when configuring multipath. Determine which disks are the internal disks and mark them as the ones to blacklist. In this example, /dev/sda is the internal disk. Note that as originally configured in the default multipath configuration file, executing the multipath -v2 shows the local disk, /dev/sda , in the multipath map. For further information on the multipath command output, see Section 5.1, "Multipath Command Output" . In order to prevent the device mapper from mapping /dev/sda in its multipath maps, edit the devnode_blacklist section of the /etc/multipath.conf file to include this device. Although you could blacklist the sda device using a devnode type, that would not be safe procedure since /dev/sda is not guaranteed to be the same on reboot. To blacklist individual devices, you can blacklist using the WWID of that device. Note that in the output to the multipath -vs command, the WWID of the /dev/sda device is SIBM-ESXSST336732LC____F3ET0EP0Q000072428BX1. To blacklist this device, include the following in the /etc/multipath.conf file. Run the following commands: The local disk or disks should no longer be listed in the new multipath maps, as shown in the following example.
[ "multipath -v2 create: SIBM-ESXSST336732LC____F3ET0EP0Q000072428BX1 [size=33 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 0:0:0:0 sda 8:0 [--------- device-mapper ioctl cmd 9 failed: Invalid argument device-mapper ioctl cmd 14 failed: No such device or address create: 3600a0b80001327d80000006d43621677 [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:0 sdb 8:16 \\_ 3:0:0:0 sdf 8:80 create: 3600a0b80001327510000009a436215ec [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:1 sdc 8:32 \\_ 3:0:0:1 sdg 8:96 create: 3600a0b80001327d800000070436216b3 [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:2 sdd 8:48 \\_ 3:0:0:2 sdh 8:112 create: 3600a0b80001327510000009b4362163e [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:3 sde 8:64 \\_ 3:0:0:3 sdi 8:128", "devnode_blacklist { wwid SIBM-ESXSST336732LC____F3ET0EP0Q000072428BX1 }", "multipath -F multipath -v2", "multipath -F multipath -v2 create: 3600a0b80001327d80000006d43621677 [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:0 sdb 8:16 \\_ 3:0:0:0 sdf 8:80 create: 3600a0b80001327510000009a436215ec [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:1 sdc 8:32 \\_ 3:0:0:1 sdg 8:96 create: 3600a0b80001327d800000070436216b3 [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:2 sdd 8:48 \\_ 3:0:0:2 sdh 8:112 create: 3600a0b80001327510000009b4362163e [size=12 GB][features=\"0\"][hwhandler=\"0\"] \\_ round-robin 0 \\_ 2:0:0:3 sde 8:64 \\_ 3:0:0:3 sdi 8:128" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/dm_multipath/ignore_localdisk_procedure
Chapter 14. Updating a cluster in a disconnected environment
Chapter 14. Updating a cluster in a disconnected environment 14.1. About cluster updates in a disconnected environment A disconnected environment is one in which your cluster nodes cannot access the internet. For this reason, you must populate a registry with the installation images. If your registry host cannot access both the internet and the cluster, you can mirror the images to a file system that is disconnected from that environment and then bring that host or removable media across that gap. If the local container registry and the cluster are connected to the mirror registry's host, you can directly push the release images to the local registry. A single container image registry is sufficient to host mirrored images for several clusters in the disconnected network. 14.1.1. Mirroring the OpenShift Container Platform image repository To update your cluster in a disconnected environment, your cluster environment must have access to a mirror registry that has the necessary images and resources for your targeted update. The following page has instructions for mirroring images onto a repository in your disconnected cluster: Mirroring the OpenShift Container Platform image repository 14.1.2. Performing a cluster update in a disconnected environment You can use one of the following procedures to update a disconnected OpenShift Container Platform cluster: Updating a cluster in a disconnected environment using the OpenShift Update Service Updating a cluster in a disconnected environment without the OpenShift Update Service 14.1.3. Uninstalling the OpenShift Update Service from a cluster You can use the following procedure to uninstall a local copy of the OpenShift Update Service (OSUS) from your cluster: Uninstalling the OpenShift Update Service from a cluster 14.2. Mirroring the OpenShift Container Platform image repository You must mirror container images onto a mirror registry before you can update a cluster in a disconnected environment. You can also use this procedure in connected environments to ensure your clusters run only approved container images that have satisfied your organizational controls for external content. Note Your mirror registry must be running at all times while the cluster is running. The following steps outline the high-level workflow on how to mirror images to a mirror registry: Install the OpenShift CLI ( oc ) on all devices being used to retrieve and push release images. Download the registry pull secret and add it to your cluster. If you use the oc-mirror OpenShift CLI ( oc ) plugin : Install the oc-mirror plugin on all devices being used to retrieve and push release images. Create an image set configuration file for the plugin to use when determining which release images to mirror. You can edit this configuration file later to change which release images that the plugin mirrors. Mirror your targeted release images directly to a mirror registry, or to removable media and then to a mirror registry. Configure your cluster to use the resources generated by the oc-mirror plugin. Repeat these steps as needed to update your mirror registry. If you use the oc adm release mirror command : Set environment variables that correspond to your environment and the release images you want to mirror. Mirror your targeted release images directly to a mirror registry, or to removable media and then to a mirror registry. Repeat these steps as needed to update your mirror registry. Compared to using the oc adm release mirror command, the oc-mirror plugin has the following advantages: It can mirror content other than container images. After mirroring images for the first time, it is easier to update images in the registry. The oc-mirror plugin provides an automated way to mirror the release payload from Quay, and also builds the latest graph data image for the OpenShift Update Service running in the disconnected environment. 14.2.1. Mirroring resources using the oc-mirror plugin You can use the oc-mirror OpenShift CLI ( oc ) plugin to mirror images to a mirror registry in your fully or partially disconnected environments. You must run oc-mirror from a system with internet connectivity to download the required images from the official Red Hat registries. See Mirroring images for a disconnected installation using the oc-mirror plugin for additional details. 14.2.2. Mirroring images using the oc adm release mirror command You can use the oc adm release mirror command to mirror images to your mirror registry. 14.2.2.1. Prerequisites You must have a container image registry that supports Docker v2-2 in the location that will host the OpenShift Container Platform cluster, such as Red Hat Quay. Note If you use Red Hat Quay, you must use version 3.6 or later with the oc-mirror plugin. If you have an entitlement to Red Hat Quay, see the documentation on deploying Red Hat Quay for proof-of-concept purposes or by using the Quay Operator . If you need additional assistance selecting and installing a registry, contact your sales representative or Red Hat Support. If you do not have an existing solution for a container image registry, the mirror registry for Red Hat OpenShift is included in OpenShift Container Platform subscriptions. The mirror registry for Red Hat OpenShift is a small-scale container registry that you can use to mirror OpenShift Container Platform container images in disconnected installations and updates. 14.2.2.2. Preparing your mirror host Before you perform the mirror procedure, you must prepare the host to retrieve content and push it to the remote location. 14.2.2.2.1. Installing the OpenShift CLI by downloading the binary You can install the OpenShift CLI ( oc ) to interact with OpenShift Container Platform from a command-line interface. You can install oc on Linux, Windows, or macOS. Important If you installed an earlier version of oc , you cannot use it to complete all of the commands in OpenShift Container Platform 4.13. Download and install the new version of oc . If you are upgrading a cluster in a disconnected environment, install the oc version that you plan to upgrade to. Installing the OpenShift CLI on Linux You can install the OpenShift CLI ( oc ) binary on Linux by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the architecture from the Product Variant drop-down list. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.13 Linux Client entry and save the file. Unpack the archive: USD tar xvf <file> Place the oc binary in a directory that is on your PATH . To check your PATH , execute the following command: USD echo USDPATH Verification After you install the OpenShift CLI, it is available using the oc command: USD oc <command> Installing the OpenShift CLI on Windows You can install the OpenShift CLI ( oc ) binary on Windows by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.13 Windows Client entry and save the file. Unzip the archive with a ZIP program. Move the oc binary to a directory that is on your PATH . To check your PATH , open the command prompt and execute the following command: C:\> path Verification After you install the OpenShift CLI, it is available using the oc command: C:\> oc <command> Installing the OpenShift CLI on macOS You can install the OpenShift CLI ( oc ) binary on macOS by using the following procedure. Procedure Navigate to the OpenShift Container Platform downloads page on the Red Hat Customer Portal. Select the appropriate version from the Version drop-down list. Click Download Now to the OpenShift v4.13 macOS Client entry and save the file. Note For macOS arm64, choose the OpenShift v4.13 macOS arm64 Client entry. Unpack and unzip the archive. Move the oc binary to a directory on your PATH. To check your PATH , open a terminal and execute the following command: USD echo USDPATH Verification After you install the OpenShift CLI, it is available using the oc command: USD oc <command> Additional resources Installing and using CLI plugins 14.2.2.2.2. Configuring credentials that allow images to be mirrored Create a container image registry credentials file that allows mirroring images from Red Hat to your mirror. Warning Do not use this image registry credentials file as the pull secret when you install a cluster. If you provide this file when you install cluster, all of the machines in the cluster will have write access to your mirror registry. Warning This process requires that you have write access to a container image registry on the mirror registry and adds the credentials to a registry pull secret. Prerequisites You configured a mirror registry to use in your disconnected environment. You identified an image repository location on your mirror registry to mirror images into. You provisioned a mirror registry account that allows images to be uploaded to that image repository. Procedure Complete the following steps on the installation host: Download your registry.redhat.io pull secret from the Red Hat OpenShift Cluster Manager . Make a copy of your pull secret in JSON format: USD cat ./pull-secret | jq . > <path>/<pull_secret_file_in_json> 1 1 Specify the path to the folder to store the pull secret in and a name for the JSON file that you create. The contents of the file resemble the following example: { "auths": { "cloud.openshift.com": { "auth": "b3BlbnNo...", "email": "[email protected]" }, "quay.io": { "auth": "b3BlbnNo...", "email": "[email protected]" }, "registry.connect.redhat.com": { "auth": "NTE3Njg5Nj...", "email": "[email protected]" }, "registry.redhat.io": { "auth": "NTE3Njg5Nj...", "email": "[email protected]" } } } Optional: If using the oc-mirror plugin, save the file as either ~/.docker/config.json or USDXDG_RUNTIME_DIR/containers/auth.json : If the .docker or USDXDG_RUNTIME_DIR/containers directories do not exist, create one by entering the following command: USD mkdir -p <directory_name> Where <directory_name> is either ~/.docker or USDXDG_RUNTIME_DIR/containers . Copy the pull secret to the appropriate directory by entering the following command: USD cp <path>/<pull_secret_file_in_json> <directory_name>/<auth_file> Where <directory_name> is either ~/.docker or USDXDG_RUNTIME_DIR/containers , and <auth_file> is either config.json or auth.json . Generate the base64-encoded user name and password or token for your mirror registry: USD echo -n '<user_name>:<password>' | base64 -w0 1 BGVtbYk3ZHAtqXs= 1 For <user_name> and <password> , specify the user name and password that you configured for your registry. Edit the JSON file and add a section that describes your registry to it: "auths": { "<mirror_registry>": { 1 "auth": "<credentials>", 2 "email": "[email protected]" } }, 1 For <mirror_registry> , specify the registry domain name, and optionally the port, that your mirror registry uses to serve content. For example, registry.example.com or registry.example.com:8443 2 For <credentials> , specify the base64-encoded user name and password for the mirror registry. The file resembles the following example: { "auths": { "registry.example.com": { "auth": "BGVtbYk3ZHAtqXs=", "email": "[email protected]" }, "cloud.openshift.com": { "auth": "b3BlbnNo...", "email": "[email protected]" }, "quay.io": { "auth": "b3BlbnNo...", "email": "[email protected]" }, "registry.connect.redhat.com": { "auth": "NTE3Njg5Nj...", "email": "[email protected]" }, "registry.redhat.io": { "auth": "NTE3Njg5Nj...", "email": "[email protected]" } } } 14.2.2.3. Mirroring images to a mirror registry Important To avoid excessive memory usage by the OpenShift Update Service application, you must mirror release images to a separate repository as described in the following procedure. Prerequisites You configured a mirror registry to use in your disconnected environment and can access the certificate and credentials that you configured. You downloaded the pull secret from the Red Hat OpenShift Cluster Manager and modified it to include authentication to your mirror repository. If you use self-signed certificates, you have specified a Subject Alternative Name in the certificates. Procedure Use the Red Hat OpenShift Container Platform Upgrade Graph visualizer and update planner to plan an update from one version to another. The OpenShift Upgrade Graph provides channel graphs and a way to confirm that there is an update path between your current and intended cluster versions. Set the required environment variables: Export the release version: USD export OCP_RELEASE=<release_version> For <release_version> , specify the tag that corresponds to the version of OpenShift Container Platform to which you want to update, such as 4.5.4 . Export the local registry name and host port: USD LOCAL_REGISTRY='<local_registry_host_name>:<local_registry_host_port>' For <local_registry_host_name> , specify the registry domain name for your mirror repository, and for <local_registry_host_port> , specify the port that it serves content on. Export the local repository name: USD LOCAL_REPOSITORY='<local_repository_name>' For <local_repository_name> , specify the name of the repository to create in your registry, such as ocp4/openshift4 . If you are using the OpenShift Update Service, export an additional local repository name to contain the release images: USD LOCAL_RELEASE_IMAGES_REPOSITORY='<local_release_images_repository_name>' For <local_release_images_repository_name> , specify the name of the repository to create in your registry, such as ocp4/openshift4-release-images . Export the name of the repository to mirror: USD PRODUCT_REPO='openshift-release-dev' For a production release, you must specify openshift-release-dev . Export the path to your registry pull secret: USD LOCAL_SECRET_JSON='<path_to_pull_secret>' For <path_to_pull_secret> , specify the absolute path to and file name of the pull secret for your mirror registry that you created. Note If your cluster uses an ImageContentSourcePolicy object to configure repository mirroring, you can use only global pull secrets for mirrored registries. You cannot add a pull secret to a project. Export the release mirror: USD RELEASE_NAME="ocp-release" For a production release, you must specify ocp-release . Export the type of architecture for your cluster: USD ARCHITECTURE=<cluster_architecture> 1 1 Specify the architecture of the cluster, such as x86_64 , aarch64 , s390x , or ppc64le . Export the path to the directory to host the mirrored images: USD REMOVABLE_MEDIA_PATH=<path> 1 1 Specify the full path, including the initial forward slash (/) character. Review the images and configuration manifests to mirror: USD oc adm release mirror -a USD{LOCAL_SECRET_JSON} --to-dir=USD{REMOVABLE_MEDIA_PATH}/mirror quay.io/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE}-USD{ARCHITECTURE} --dry-run Mirror the version images to the mirror registry. If your mirror host does not have internet access, take the following actions: Connect the removable media to a system that is connected to the internet. Mirror the images and configuration manifests to a directory on the removable media: USD oc adm release mirror -a USD{LOCAL_SECRET_JSON} --to-dir=USD{REMOVABLE_MEDIA_PATH}/mirror quay.io/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE}-USD{ARCHITECTURE} Note This command also generates and saves the mirrored release image signature config map onto the removable media. Take the media to the disconnected environment and upload the images to the local container registry. USD oc image mirror -a USD{LOCAL_SECRET_JSON} --from-dir=USD{REMOVABLE_MEDIA_PATH}/mirror "file://openshift/release:USD{OCP_RELEASE}*" USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY} 1 1 For REMOVABLE_MEDIA_PATH , you must use the same path that you specified when you mirrored the images. Use oc command-line interface (CLI) to log in to the cluster that you are upgrading. Apply the mirrored release image signature config map to the connected cluster: USD oc apply -f USD{REMOVABLE_MEDIA_PATH}/mirror/config/<image_signature_file> 1 1 For <image_signature_file> , specify the path and name of the file, for example, signature-sha256-81154f5c03294534.yaml . If you are using the OpenShift Update Service, mirror the release image to a separate repository: USD oc image mirror -a USD{LOCAL_SECRET_JSON} USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE} USD{LOCAL_REGISTRY}/USD{LOCAL_RELEASE_IMAGES_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE} If the local container registry and the cluster are connected to the mirror host, take the following actions: Directly push the release images to the local registry and apply the config map to the cluster by using following command: USD oc adm release mirror -a USD{LOCAL_SECRET_JSON} --from=quay.io/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE}-USD{ARCHITECTURE} \ --to=USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY} --apply-release-image-signature Note If you include the --apply-release-image-signature option, do not create the config map for image signature verification. If you are using the OpenShift Update Service, mirror the release image to a separate repository: USD oc image mirror -a USD{LOCAL_SECRET_JSON} USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE} USD{LOCAL_REGISTRY}/USD{LOCAL_RELEASE_IMAGES_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE} 14.3. Updating a cluster in a disconnected environment using the OpenShift Update Service To get an update experience similar to connected clusters, you can use the following procedures to install and configure the OpenShift Update Service (OSUS) in a disconnected environment. The following steps outline the high-level workflow on how to update a cluster in a disconnected environment using OSUS: Configure access to a secured registry. Update the global cluster pull secret to access your mirror registry. Install the OSUS Operator. Create a graph data container image for the OpenShift Update Service. Install the OSUS application and configure your clusters to use the local OpenShift Update Service. Perform a supported update procedure from the documentation as you would with a connected cluster. 14.3.1. Using the OpenShift Update Service in a disconnected environment The OpenShift Update Service (OSUS) provides update recommendations to OpenShift Container Platform clusters. Red Hat publicly hosts the OpenShift Update Service, and clusters in a connected environment can connect to the service through public APIs to retrieve update recommendations. However, clusters in a disconnected environment cannot access these public APIs to retrieve update information. To have a similar update experience in a disconnected environment, you can install and configure the OpenShift Update Service locally so that it is available within the disconnected environment. A single OSUS instance is capable of serving recommendations to thousands of clusters. OSUS can be scaled horizontally to cater to more clusters by changing the replica value. So for most disconnected use cases, one OSUS instance is enough. For example, Red Hat hosts just one OSUS instance for the entire fleet of connected clusters. If you want to keep update recommendations separate in different environments, you can run one OSUS instance for each environment. For example, in a case where you have separate test and stage environments, you might not want a cluster in a stage environment to receive update recommendations to version A if that version has not been tested in the test environment yet. The following sections describe how to install a local OSUS instance and configure it to provide update recommendations to a cluster. Additional resources About the OpenShift Update Service Understanding update channels and releases 14.3.2. Prerequisites You must have the oc command-line interface (CLI) tool installed. You must provision a local container image registry with the container images for your update, as described in Mirroring the OpenShift Container Platform image repository . 14.3.3. Configuring access to a secured registry for the OpenShift Update Service If the release images are contained in a registry whose HTTPS X.509 certificate is signed by a custom certificate authority, complete the steps in Configuring additional trust stores for image registry access along with following changes for the update service. The OpenShift Update Service Operator needs the config map key name updateservice-registry in the registry CA cert. Image registry CA config map example for the update service apiVersion: v1 kind: ConfigMap metadata: name: my-registry-ca data: updateservice-registry: | 1 -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- registry-with-port.example.com..5000: | 2 -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- 1 The OpenShift Update Service Operator requires the config map key name updateservice-registry in the registry CA cert. 2 If the registry has the port, such as registry-with-port.example.com:5000 , : should be replaced with .. . 14.3.4. Updating the global cluster pull secret You can update the global pull secret for your cluster by either replacing the current pull secret or appending a new pull secret. The procedure is required when users use a separate registry to store images than the registry used during installation. Prerequisites You have access to the cluster as a user with the cluster-admin role. Procedure Optional: To append a new pull secret to the existing pull secret, complete the following steps: Enter the following command to download the pull secret: USD oc get secret/pull-secret -n openshift-config --template='{{index .data ".dockerconfigjson" | base64decode}}' ><pull_secret_location> 1 1 Provide the path to the pull secret file. Enter the following command to add the new pull secret: USD oc registry login --registry="<registry>" \ 1 --auth-basic="<username>:<password>" \ 2 --to=<pull_secret_location> 3 1 Provide the new registry. You can include multiple repositories within the same registry, for example: --registry="<registry/my-namespace/my-repository>" . 2 Provide the credentials of the new registry. 3 Provide the path to the pull secret file. Alternatively, you can perform a manual update to the pull secret file. Enter the following command to update the global pull secret for your cluster: USD oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=<pull_secret_location> 1 1 Provide the path to the new pull secret file. This update is rolled out to all nodes, which can take some time depending on the size of your cluster. Note As of OpenShift Container Platform 4.7.4, changes to the global pull secret no longer trigger a node drain or reboot. 14.3.5. Installing the OpenShift Update Service Operator To install the OpenShift Update Service, you must first install the OpenShift Update Service Operator by using the OpenShift Container Platform web console or CLI. Note For clusters that are installed in disconnected environments, also known as disconnected clusters, Operator Lifecycle Manager by default cannot access the Red Hat-provided OperatorHub sources hosted on remote registries because those remote sources require full internet connectivity. For more information, see Using Operator Lifecycle Manager on restricted networks . 14.3.5.1. Installing the OpenShift Update Service Operator by using the web console You can use the web console to install the OpenShift Update Service Operator. Procedure In the web console, click Operators OperatorHub . Note Enter Update Service into the Filter by keyword... field to find the Operator faster. Choose OpenShift Update Service from the list of available Operators, and click Install . Channel v1 is selected as the Update Channel since it is the only channel available in this release. Select A specific namespace on the cluster under Installation Mode . Select a namespace for Installed Namespace or accept the recommended namespace openshift-update-service . Select an Approval Strategy : The Automatic strategy allows Operator Lifecycle Manager (OLM) to automatically update the Operator when a new version is available. The Manual strategy requires a cluster administrator to approve the Operator update. Click Install . Verify that the OpenShift Update Service Operator is installed by switching to the Operators Installed Operators page. Ensure that OpenShift Update Service is listed in the selected namespace with a Status of Succeeded . 14.3.5.2. Installing the OpenShift Update Service Operator by using the CLI You can use the OpenShift CLI ( oc ) to install the OpenShift Update Service Operator. Procedure Create a namespace for the OpenShift Update Service Operator: Create a Namespace object YAML file, for example, update-service-namespace.yaml , for the OpenShift Update Service Operator: apiVersion: v1 kind: Namespace metadata: name: openshift-update-service annotations: openshift.io/node-selector: "" labels: openshift.io/cluster-monitoring: "true" 1 1 Set the openshift.io/cluster-monitoring label to enable Operator-recommended cluster monitoring on this namespace. Create the namespace: USD oc create -f <filename>.yaml For example: USD oc create -f update-service-namespace.yaml Install the OpenShift Update Service Operator by creating the following objects: Create an OperatorGroup object YAML file, for example, update-service-operator-group.yaml : apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: update-service-operator-group spec: targetNamespaces: - openshift-update-service Create an OperatorGroup object: USD oc -n openshift-update-service create -f <filename>.yaml For example: USD oc -n openshift-update-service create -f update-service-operator-group.yaml Create a Subscription object YAML file, for example, update-service-subscription.yaml : Example Subscription apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: update-service-subscription spec: channel: v1 installPlanApproval: "Automatic" source: "redhat-operators" 1 sourceNamespace: "openshift-marketplace" name: "cincinnati-operator" 1 Specify the name of the catalog source that provides the Operator. For clusters that do not use a custom Operator Lifecycle Manager (OLM), specify redhat-operators . If your OpenShift Container Platform cluster is installed in a disconnected environment, specify the name of the CatalogSource object created when you configured Operator Lifecycle Manager (OLM). Create the Subscription object: USD oc create -f <filename>.yaml For example: USD oc -n openshift-update-service create -f update-service-subscription.yaml The OpenShift Update Service Operator is installed to the openshift-update-service namespace and targets the openshift-update-service namespace. Verify the Operator installation: USD oc -n openshift-update-service get clusterserviceversions Example output NAME DISPLAY VERSION REPLACES PHASE update-service-operator.v4.6.0 OpenShift Update Service 4.6.0 Succeeded ... If the OpenShift Update Service Operator is listed, the installation was successful. The version number might be different than shown. Additional resources Installing Operators in your namespace . 14.3.6. Creating the OpenShift Update Service graph data container image The OpenShift Update Service requires a graph data container image, from which the OpenShift Update Service retrieves information about channel membership and blocked update edges. Graph data is typically fetched directly from the upgrade graph data repository. In environments where an internet connection is unavailable, loading this information from an init container is another way to make the graph data available to the OpenShift Update Service. The role of the init container is to provide a local copy of the graph data, and during pod initialization, the init container copies the data to a volume that is accessible by the service. Note The oc-mirror OpenShift CLI ( oc ) plugin creates this graph data container image in addition to mirroring release images. If you used the oc-mirror plugin to mirror your release images, you can skip this procedure. Procedure Create a Dockerfile, for example, ./Dockerfile , containing the following: FROM registry.access.redhat.com/ubi9/ubi:latest RUN curl -L -o cincinnati-graph-data.tar.gz https://api.openshift.com/api/upgrades_info/graph-data RUN mkdir -p /var/lib/cincinnati-graph-data && tar xvzf cincinnati-graph-data.tar.gz -C /var/lib/cincinnati-graph-data/ --no-overwrite-dir --no-same-owner CMD ["/bin/bash", "-c" ,"exec cp -rp /var/lib/cincinnati-graph-data/* /var/lib/cincinnati/graph-data"] Use the docker file created in the above step to build a graph data container image, for example, registry.example.com/openshift/graph-data:latest : USD podman build -f ./Dockerfile -t registry.example.com/openshift/graph-data:latest Push the graph data container image created in the step to a repository that is accessible to the OpenShift Update Service, for example, registry.example.com/openshift/graph-data:latest : USD podman push registry.example.com/openshift/graph-data:latest Note To push a graph data image to a local registry in a disconnected environment, copy the graph data container image created in the step to a repository that is accessible to the OpenShift Update Service. Run oc image mirror --help for available options. 14.3.7. Creating an OpenShift Update Service application You can create an OpenShift Update Service application by using the OpenShift Container Platform web console or CLI. 14.3.7.1. Creating an OpenShift Update Service application by using the web console You can use the OpenShift Container Platform web console to create an OpenShift Update Service application by using the OpenShift Update Service Operator. Prerequisites The OpenShift Update Service Operator has been installed. The OpenShift Update Service graph data container image has been created and pushed to a repository that is accessible to the OpenShift Update Service. The current release and update target releases have been mirrored to a locally accessible registry. Procedure In the web console, click Operators Installed Operators . Choose OpenShift Update Service from the list of installed Operators. Click the Update Service tab. Click Create UpdateService . Enter a name in the Name field, for example, service . Enter the local pullspec in the Graph Data Image field to the graph data container image created in "Creating the OpenShift Update Service graph data container image", for example, registry.example.com/openshift/graph-data:latest . In the Releases field, enter the local registry and repository created to contain the release images in "Mirroring the OpenShift Container Platform image repository", for example, registry.example.com/ocp4/openshift4-release-images . Enter 2 in the Replicas field. Click Create to create the OpenShift Update Service application. Verify the OpenShift Update Service application: From the UpdateServices list in the Update Service tab, click the Update Service application just created. Click the Resources tab. Verify each application resource has a status of Created . 14.3.7.2. Creating an OpenShift Update Service application by using the CLI You can use the OpenShift CLI ( oc ) to create an OpenShift Update Service application. Prerequisites The OpenShift Update Service Operator has been installed. The OpenShift Update Service graph data container image has been created and pushed to a repository that is accessible to the OpenShift Update Service. The current release and update target releases have been mirrored to a locally accessible registry. Procedure Configure the OpenShift Update Service target namespace, for example, openshift-update-service : USD NAMESPACE=openshift-update-service The namespace must match the targetNamespaces value from the operator group. Configure the name of the OpenShift Update Service application, for example, service : USD NAME=service Configure the local registry and repository for the release images as configured in "Mirroring the OpenShift Container Platform image repository", for example, registry.example.com/ocp4/openshift4-release-images : USD RELEASE_IMAGES=registry.example.com/ocp4/openshift4-release-images Set the local pullspec for the graph data image to the graph data container image created in "Creating the OpenShift Update Service graph data container image", for example, registry.example.com/openshift/graph-data:latest : USD GRAPH_DATA_IMAGE=registry.example.com/openshift/graph-data:latest Create an OpenShift Update Service application object: USD oc -n "USD{NAMESPACE}" create -f - <<EOF apiVersion: updateservice.operator.openshift.io/v1 kind: UpdateService metadata: name: USD{NAME} spec: replicas: 2 releases: USD{RELEASE_IMAGES} graphDataImage: USD{GRAPH_DATA_IMAGE} EOF Verify the OpenShift Update Service application: Use the following command to obtain a policy engine route: USD while sleep 1; do POLICY_ENGINE_GRAPH_URI="USD(oc -n "USD{NAMESPACE}" get -o jsonpath='{.status.policyEngineURI}/api/upgrades_info/v1/graph{"\n"}' updateservice "USD{NAME}")"; SCHEME="USD{POLICY_ENGINE_GRAPH_URI%%:*}"; if test "USD{SCHEME}" = http -o "USD{SCHEME}" = https; then break; fi; done You might need to poll until the command succeeds. Retrieve a graph from the policy engine. Be sure to specify a valid version for channel . For example, if running in OpenShift Container Platform 4.13, use stable-4.13 : USD while sleep 10; do HTTP_CODE="USD(curl --header Accept:application/json --output /dev/stderr --write-out "%{http_code}" "USD{POLICY_ENGINE_GRAPH_URI}?channel=stable-4.6")"; if test "USD{HTTP_CODE}" -eq 200; then break; fi; echo "USD{HTTP_CODE}"; done This polls until the graph request succeeds; however, the resulting graph might be empty depending on which release images you have mirrored. Note The policy engine route name must not be more than 63 characters based on RFC-1123. If you see ReconcileCompleted status as false with the reason CreateRouteFailed caused by host must conform to DNS 1123 naming convention and must be no more than 63 characters , try creating the Update Service with a shorter name. 14.3.7.2.1. Configuring the Cluster Version Operator (CVO) After the OpenShift Update Service Operator has been installed and the OpenShift Update Service application has been created, the Cluster Version Operator (CVO) can be updated to pull graph data from the locally installed OpenShift Update Service. Prerequisites The OpenShift Update Service Operator has been installed. The OpenShift Update Service graph data container image has been created and pushed to a repository that is accessible to the OpenShift Update Service. The current release and update target releases have been mirrored to a locally accessible registry. The OpenShift Update Service application has been created. Procedure Set the OpenShift Update Service target namespace, for example, openshift-update-service : USD NAMESPACE=openshift-update-service Set the name of the OpenShift Update Service application, for example, service : USD NAME=service Obtain the policy engine route: USD POLICY_ENGINE_GRAPH_URI="USD(oc -n "USD{NAMESPACE}" get -o jsonpath='{.status.policyEngineURI}/api/upgrades_info/v1/graph{"\n"}' updateservice "USD{NAME}")" Set the patch for the pull graph data: USD PATCH="{\"spec\":{\"upstream\":\"USD{POLICY_ENGINE_GRAPH_URI}\"}}" Patch the CVO to use the local OpenShift Update Service: USD oc patch clusterversion version -p USDPATCH --type merge Note See Enabling the cluster-wide proxy to configure the CA to trust the update server. 14.3.8. steps Before updating your cluster, confirm that the following conditions are met: The Cluster Version Operator (CVO) is configured to use your locally-installed OpenShift Update Service application. The release image signature config map for the new release is applied to your cluster. Note The release image signature config map allows the Cluster Version Operator (CVO) to ensure the integrity of release images by verifying that the actual image signatures match the expected signatures. The current release and update target release images are mirrored to a locally accessible registry. A recent graph data container image has been mirrored to your local registry. A recent version of the OpenShift Update Service Operator is installed. Note If you have not recently installed or updated the OpenShift Update Service Operator, there might be a more recent version available. See Using Operator Lifecycle Manager on restricted networks for more information about how to update your OLM catalog in a disconnected environment. After you configure your cluster to use the locally-installed OpenShift Update Service and local mirror registry, you can use any of the following update methods: Updating a cluster using the web console Updating a cluster using the CLI Preparing to perform an EUS-to-EUS update Performing a canary rollout update Updating a cluster that includes RHEL compute machines 14.4. Updating a cluster in a disconnected environment without the OpenShift Update Service Use the following procedures to update a cluster in a disconnected environment without access to the OpenShift Update Service. 14.4.1. Prerequisites You must have the oc command-line interface (CLI) tool installed. You must provision a local container image registry with the container images for your update, as described in Mirroring the OpenShift Container Platform image repository . You must have access to the cluster as a user with admin privileges. See Using RBAC to define and apply permissions . You must have a recent etcd backup in case your update fails and you must restore your cluster to a state . You must ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. If your cluster uses manually maintained credentials, update the cloud provider resources for the new release. For more information, including how to determine if this is a requirement for your cluster, see Preparing to update a cluster with manually maintained credentials . If you run an Operator or you have configured any application with the pod disruption budget, you might experience an interruption during the upgrade process. If minAvailable is set to 1 in PodDisruptionBudget , the nodes are drained to apply pending machine configs which might block the eviction process. If several nodes are rebooted, all the pods might run on only one node, and the PodDisruptionBudget field can prevent the node drain. Note If you run an Operator or you have configured any application with the pod disruption budget, you might experience an interruption during the upgrade process. If minAvailable is set to 1 in PodDisruptionBudget , the nodes are drained to apply pending machine configs which might block the eviction process. If several nodes are rebooted, all the pods might run on only one node, and the PodDisruptionBudget field can prevent the node drain. 14.4.2. Pausing a MachineHealthCheck resource During the upgrade process, nodes in the cluster might become temporarily unavailable. In the case of worker nodes, the machine health check might identify such nodes as unhealthy and reboot them. To avoid rebooting such nodes, pause all the MachineHealthCheck resources before updating the cluster. Prerequisites Install the OpenShift CLI ( oc ). Procedure To list all the available MachineHealthCheck resources that you want to pause, run the following command: USD oc get machinehealthcheck -n openshift-machine-api To pause the machine health checks, add the cluster.x-k8s.io/paused="" annotation to the MachineHealthCheck resource. Run the following command: USD oc -n openshift-machine-api annotate mhc <mhc-name> cluster.x-k8s.io/paused="" The annotated MachineHealthCheck resource resembles the following YAML file: apiVersion: machine.openshift.io/v1beta1 kind: MachineHealthCheck metadata: name: example namespace: openshift-machine-api annotations: cluster.x-k8s.io/paused: "" spec: selector: matchLabels: role: worker unhealthyConditions: - type: "Ready" status: "Unknown" timeout: "300s" - type: "Ready" status: "False" timeout: "300s" maxUnhealthy: "40%" status: currentHealthy: 5 expectedMachines: 5 Important Resume the machine health checks after updating the cluster. To resume the check, remove the pause annotation from the MachineHealthCheck resource by running the following command: USD oc -n openshift-machine-api annotate mhc <mhc-name> cluster.x-k8s.io/paused- 14.4.3. Retrieving a release image digest In order to update a cluster in a disconnected environment using the oc adm upgrade command with the --to-image option, you must reference the sha256 digest that corresponds to your targeted release image. Procedure Run the following command on a device that is connected to the internet: USD oc adm release info -o 'jsonpath={.digest}{"\n"}' quay.io/openshift-release-dev/ocp-release:USD{OCP_RELEASE_VERSION}-USD{ARCHITECTURE} For {OCP_RELEASE_VERSION} , specify the version of OpenShift Container Platform to which you want to update, such as 4.10.16 . For {ARCHITECTURE} , specify the architecture of the cluster, such as x86_64 , aarch64 , s390x , or ppc64le . Example output sha256:a8bfba3b6dddd1a2fbbead7dac65fe4fb8335089e4e7cae327f3bad334add31d Copy the sha256 digest for use when updating your cluster. 14.4.4. Updating the disconnected cluster Update the disconnected cluster to the OpenShift Container Platform version that you downloaded the release images for. Note If you have a local OpenShift Update Service, you can update by using the connected web console or CLI instructions instead of this procedure. Prerequisites You mirrored the images for the new release to your registry. You applied the release image signature ConfigMap for the new release to your cluster. Note The release image signature config map allows the Cluster Version Operator (CVO) to ensure the integrity of release images by verifying that the actual image signatures match the expected signatures. You obtained the sha256 digest for your targeted release image. You installed the OpenShift CLI ( oc ). You paused all MachineHealthCheck resources. Procedure Update the cluster: USD oc adm upgrade --allow-explicit-upgrade --to-image <defined_registry>/<defined_repository>@<digest> Where: <defined_registry> Specifies the name of the mirror registry you mirrored your images to. <defined_repository> Specifies the name of the image repository you want to use on the mirror registry. <digest> Specifies the sha256 digest for the targeted release image, for example, sha256:81154f5c03294534e1eaf0319bef7a601134f891689ccede5d705ef659aa8c92 . Note See "Mirroring OpenShift Container Platform images" to review how your mirror registry and repository names are defined. If you used an ImageContentSourcePolicy or ImageDigestMirrorSet , you can use the canonical registry and repository names instead of the names you defined. The canonical registry name is quay.io and the canonical repository name is openshift-release-dev/ocp-release . You can only configure global pull secrets for clusters that have an ImageContentSourcePolicy , ImageDigestMirrorSet , or ImageTagMirrorSet object. You cannot add a pull secret to a project. Additional resources Mirroring OpenShift Container Platform images 14.4.5. Understanding image registry repository mirroring Setting up container registry repository mirroring enables you to perform the following tasks: Configure your OpenShift Container Platform cluster to redirect requests to pull images from a repository on a source image registry and have it resolved by a repository on a mirrored image registry. Identify multiple mirrored repositories for each target repository, to make sure that if one mirror is down, another can be used. Repository mirroring in OpenShift Container Platform includes the following attributes: Image pulls are resilient to registry downtimes. Clusters in disconnected environments can pull images from critical locations, such as quay.io, and have registries behind a company firewall provide the requested images. A particular order of registries is tried when an image pull request is made, with the permanent registry typically being the last one tried. The mirror information you enter is added to the /etc/containers/registries.conf file on every node in the OpenShift Container Platform cluster. When a node makes a request for an image from the source repository, it tries each mirrored repository in turn until it finds the requested content. If all mirrors fail, the cluster tries the source repository. If successful, the image is pulled to the node. Setting up repository mirroring can be done in the following ways: At OpenShift Container Platform installation: By pulling container images needed by OpenShift Container Platform and then bringing those images behind your company's firewall, you can install OpenShift Container Platform into a datacenter that is in a disconnected environment. After OpenShift Container Platform installation: If you did not configure mirroring during OpenShift Container Platform installation, you can do so postinstallation by using any of the following custom resource (CR) objects: ImageDigestMirrorSet (IDMS). This object allows you to pull images from a mirrored registry by using digest specifications. The IDMS CR enables you to set a fall back policy that allows or stops continued attempts to pull from the source registry if the image pull fails. ImageTagMirrorSet (ITMS). This object allows you to pull images from a mirrored registry by using image tags. The ITMS CR enables you to set a fall back policy that allows or stops continued attempts to pull from the source registry if the image pull fails. ImageContentSourcePolicy (ICSP). This object allows you to pull images from a mirrored registry by using digest specifications. The ICSP CR always falls back to the source registry if the mirrors do not work. Important Using an ImageContentSourcePolicy (ICSP) object to configure repository mirroring is a deprecated feature. Deprecated functionality is still included in OpenShift Container Platform and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. If you have existing YAML files that you used to create ImageContentSourcePolicy objects, you can use the oc adm migrate icsp command to convert those files to an ImageDigestMirrorSet YAML file. For more information, see "Converting ImageContentSourcePolicy (ICSP) files for image registry repository mirroring" in the following section. Each of these custom resource objects identify the following information: The source of the container image repository you want to mirror. A separate entry for each mirror repository you want to offer the content requested from the source repository. For new clusters, you can use IDMS, ITMS, and ICSP CRs objects as desired. However, using IDMS and ITMS is recommended. If you upgraded a cluster, any existing ICSP objects remain stable, and both IDMS and ICSP objects are supported. Workloads using ICSP objects continue to function as expected. However, if you want to take advantage of the fallback policies introduced in the IDMS CRs, you can migrate current workloads to IDMS objects by using the oc adm migrate icsp command as shown in the Converting ImageContentSourcePolicy (ICSP) files for image registry repository mirroring section that follows. Migrating to IDMS objects does not require a cluster reboot. Note If your cluster uses an ImageDigestMirrorSet , ImageTagMirrorSet , or ImageContentSourcePolicy object to configure repository mirroring, you can use only global pull secrets for mirrored registries. You cannot add a pull secret to a project. 14.4.5.1. Converting ImageContentSourcePolicy (ICSP) files for image registry repository mirroring Using an ImageContentSourcePolicy (ICSP) object to configure repository mirroring is a deprecated feature. This functionality is still included in OpenShift Container Platform and continues to be supported; however, it will be removed in a future release of this product and is not recommended for new deployments. ICSP objects are being replaced by ImageDigestMirrorSet and ImageTagMirrorSet objects to configure repository mirroring. If you have existing YAML files that you used to create ImageContentSourcePolicy objects, you can use the oc adm migrate icsp command to convert those files to an ImageDigestMirrorSet YAML file. The command updates the API to the current version, changes the kind value to ImageDigestMirrorSet , and changes spec.repositoryDigestMirrors to spec.imageDigestMirrors . The rest of the file is not changed. Because the migration does not change the registries.conf file, the cluster does not need to reboot. For more information about ImageDigestMirrorSet or ImageTagMirrorSet objects, see "Configuring image registry repository mirroring" in the section. Prerequisites Access to the cluster as a user with the cluster-admin role. Ensure that you have ImageContentSourcePolicy objects on your cluster. Procedure Use the following command to convert one or more ImageContentSourcePolicy YAML files to an ImageDigestMirrorSet YAML file: USD oc adm migrate icsp <file_name>.yaml <file_name>.yaml <file_name>.yaml --dest-dir <path_to_the_directory> where: <file_name> Specifies the name of the source ImageContentSourcePolicy YAML. You can list multiple file names. --dest-dir Optional: Specifies a directory for the output ImageDigestMirrorSet YAML. If unset, the file is written to the current directory. For example, the following command converts the icsp.yaml and icsp-2.yaml file and saves the new YAML files to the idms-files directory. USD oc adm migrate icsp icsp.yaml icsp-2.yaml --dest-dir idms-files Example output wrote ImageDigestMirrorSet to idms-files/imagedigestmirrorset_ubi8repo.5911620242173376087.yaml wrote ImageDigestMirrorSet to idms-files/imagedigestmirrorset_ubi9repo.6456931852378115011.yaml Create the CR object by running the following command: USD oc create -f <path_to_the_directory>/<file-name>.yaml where: <path_to_the_directory> Specifies the path to the directory, if you used the --dest-dir flag. <file_name> Specifies the name of the ImageDigestMirrorSet YAML. Remove the ICSP objects after the IDMS objects are rolled out. 14.4.6. Widening the scope of the mirror image catalog to reduce the frequency of cluster node reboots You can scope the mirrored image catalog at the repository level or the wider registry level. A widely scoped ImageContentSourcePolicy resource reduces the number of times the nodes need to reboot in response to changes to the resource. To widen the scope of the mirror image catalog in the ImageContentSourcePolicy resource, perform the following procedure. Prerequisites Install the OpenShift Container Platform CLI oc . Log in as a user with cluster-admin privileges. Configure a mirrored image catalog for use in your disconnected cluster. Procedure Run the following command, specifying values for <local_registry> , <pull_spec> , and <pull_secret_file> : USD oc adm catalog mirror <local_registry>/<pull_spec> <local_registry> -a <pull_secret_file> --icsp-scope=registry where: <local_registry> is the local registry you have configured for your disconnected cluster, for example, local.registry:5000 . <pull_spec> is the pull specification as configured in your disconnected registry, for example, redhat/redhat-operator-index:v4.13 <pull_secret_file> is the registry.redhat.io pull secret in .json file format. You can download the pull secret from the Red Hat OpenShift Cluster Manager . The oc adm catalog mirror command creates a /redhat-operator-index-manifests directory and generates imageContentSourcePolicy.yaml , catalogSource.yaml , and mapping.txt files. Apply the new ImageContentSourcePolicy resource to the cluster: USD oc apply -f imageContentSourcePolicy.yaml Verification Verify that oc apply successfully applied the change to ImageContentSourcePolicy : USD oc get ImageContentSourcePolicy -o yaml Example output apiVersion: v1 items: - apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operator.openshift.io/v1alpha1","kind":"ImageContentSourcePolicy","metadata":{"annotations":{},"name":"redhat-operator-index"},"spec":{"repositoryDigestMirrors":[{"mirrors":["local.registry:5000"],"source":"registry.redhat.io"}]}} ... After you update the ImageContentSourcePolicy resource, OpenShift Container Platform deploys the new settings to each node and the cluster starts using the mirrored repository for requests to the source repository. 14.4.7. Additional resources Using Operator Lifecycle Manager on restricted networks Machine Config Overview 14.5. Uninstalling the OpenShift Update Service from a cluster To remove a local copy of the OpenShift Update Service (OSUS) from your cluster, you must first delete the OSUS application and then uninstall the OSUS Operator. 14.5.1. Deleting an OpenShift Update Service application You can delete an OpenShift Update Service application by using the OpenShift Container Platform web console or CLI. 14.5.1.1. Deleting an OpenShift Update Service application by using the web console You can use the OpenShift Container Platform web console to delete an OpenShift Update Service application by using the OpenShift Update Service Operator. Prerequisites The OpenShift Update Service Operator has been installed. Procedure In the web console, click Operators Installed Operators . Choose OpenShift Update Service from the list of installed Operators. Click the Update Service tab. From the list of installed OpenShift Update Service applications, select the application to be deleted and then click Delete UpdateService . From the Delete UpdateService? confirmation dialog, click Delete to confirm the deletion. 14.5.1.2. Deleting an OpenShift Update Service application by using the CLI You can use the OpenShift CLI ( oc ) to delete an OpenShift Update Service application. Procedure Get the OpenShift Update Service application name using the namespace the OpenShift Update Service application was created in, for example, openshift-update-service : USD oc get updateservice -n openshift-update-service Example output NAME AGE service 6s Delete the OpenShift Update Service application using the NAME value from the step and the namespace the OpenShift Update Service application was created in, for example, openshift-update-service : USD oc delete updateservice service -n openshift-update-service Example output updateservice.updateservice.operator.openshift.io "service" deleted 14.5.2. Uninstalling the OpenShift Update Service Operator You can uninstall the OpenShift Update Service Operator by using the OpenShift Container Platform web console or CLI. 14.5.2.1. Uninstalling the OpenShift Update Service Operator by using the web console You can use the OpenShift Container Platform web console to uninstall the OpenShift Update Service Operator. Prerequisites All OpenShift Update Service applications have been deleted. Procedure In the web console, click Operators Installed Operators . Select OpenShift Update Service from the list of installed Operators and click Uninstall Operator . From the Uninstall Operator? confirmation dialog, click Uninstall to confirm the uninstallation. 14.5.2.2. Uninstalling the OpenShift Update Service Operator by using the CLI You can use the OpenShift CLI ( oc ) to uninstall the OpenShift Update Service Operator. Prerequisites All OpenShift Update Service applications have been deleted. Procedure Change to the project containing the OpenShift Update Service Operator, for example, openshift-update-service : USD oc project openshift-update-service Example output Now using project "openshift-update-service" on server "https://example.com:6443". Get the name of the OpenShift Update Service Operator operator group: USD oc get operatorgroup Example output NAME AGE openshift-update-service-fprx2 4m41s Delete the operator group, for example, openshift-update-service-fprx2 : USD oc delete operatorgroup openshift-update-service-fprx2 Example output operatorgroup.operators.coreos.com "openshift-update-service-fprx2" deleted Get the name of the OpenShift Update Service Operator subscription: USD oc get subscription Example output NAME PACKAGE SOURCE CHANNEL update-service-operator update-service-operator updateservice-index-catalog v1 Using the Name value from the step, check the current version of the subscribed OpenShift Update Service Operator in the currentCSV field: USD oc get subscription update-service-operator -o yaml | grep " currentCSV" Example output currentCSV: update-service-operator.v0.0.1 Delete the subscription, for example, update-service-operator : USD oc delete subscription update-service-operator Example output subscription.operators.coreos.com "update-service-operator" deleted Delete the CSV for the OpenShift Update Service Operator using the currentCSV value from the step: USD oc delete clusterserviceversion update-service-operator.v0.0.1 Example output clusterserviceversion.operators.coreos.com "update-service-operator.v0.0.1" deleted
[ "tar xvf <file>", "echo USDPATH", "oc <command>", "C:\\> path", "C:\\> oc <command>", "echo USDPATH", "oc <command>", "cat ./pull-secret | jq . > <path>/<pull_secret_file_in_json> 1", "{ \"auths\": { \"cloud.openshift.com\": { \"auth\": \"b3BlbnNo...\", \"email\": \"[email protected]\" }, \"quay.io\": { \"auth\": \"b3BlbnNo...\", \"email\": \"[email protected]\" }, \"registry.connect.redhat.com\": { \"auth\": \"NTE3Njg5Nj...\", \"email\": \"[email protected]\" }, \"registry.redhat.io\": { \"auth\": \"NTE3Njg5Nj...\", \"email\": \"[email protected]\" } } }", "mkdir -p <directory_name>", "cp <path>/<pull_secret_file_in_json> <directory_name>/<auth_file>", "echo -n '<user_name>:<password>' | base64 -w0 1 BGVtbYk3ZHAtqXs=", "\"auths\": { \"<mirror_registry>\": { 1 \"auth\": \"<credentials>\", 2 \"email\": \"[email protected]\" } },", "{ \"auths\": { \"registry.example.com\": { \"auth\": \"BGVtbYk3ZHAtqXs=\", \"email\": \"[email protected]\" }, \"cloud.openshift.com\": { \"auth\": \"b3BlbnNo...\", \"email\": \"[email protected]\" }, \"quay.io\": { \"auth\": \"b3BlbnNo...\", \"email\": \"[email protected]\" }, \"registry.connect.redhat.com\": { \"auth\": \"NTE3Njg5Nj...\", \"email\": \"[email protected]\" }, \"registry.redhat.io\": { \"auth\": \"NTE3Njg5Nj...\", \"email\": \"[email protected]\" } } }", "export OCP_RELEASE=<release_version>", "LOCAL_REGISTRY='<local_registry_host_name>:<local_registry_host_port>'", "LOCAL_REPOSITORY='<local_repository_name>'", "LOCAL_RELEASE_IMAGES_REPOSITORY='<local_release_images_repository_name>'", "PRODUCT_REPO='openshift-release-dev'", "LOCAL_SECRET_JSON='<path_to_pull_secret>'", "RELEASE_NAME=\"ocp-release\"", "ARCHITECTURE=<cluster_architecture> 1", "REMOVABLE_MEDIA_PATH=<path> 1", "oc adm release mirror -a USD{LOCAL_SECRET_JSON} --to-dir=USD{REMOVABLE_MEDIA_PATH}/mirror quay.io/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE}-USD{ARCHITECTURE} --dry-run", "oc adm release mirror -a USD{LOCAL_SECRET_JSON} --to-dir=USD{REMOVABLE_MEDIA_PATH}/mirror quay.io/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE}-USD{ARCHITECTURE}", "oc image mirror -a USD{LOCAL_SECRET_JSON} --from-dir=USD{REMOVABLE_MEDIA_PATH}/mirror \"file://openshift/release:USD{OCP_RELEASE}*\" USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY} 1", "oc apply -f USD{REMOVABLE_MEDIA_PATH}/mirror/config/<image_signature_file> 1", "oc image mirror -a USD{LOCAL_SECRET_JSON} USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE} USD{LOCAL_REGISTRY}/USD{LOCAL_RELEASE_IMAGES_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}", "oc adm release mirror -a USD{LOCAL_SECRET_JSON} --from=quay.io/USD{PRODUCT_REPO}/USD{RELEASE_NAME}:USD{OCP_RELEASE}-USD{ARCHITECTURE} --to=USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY} --apply-release-image-signature", "oc image mirror -a USD{LOCAL_SECRET_JSON} USD{LOCAL_REGISTRY}/USD{LOCAL_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE} USD{LOCAL_REGISTRY}/USD{LOCAL_RELEASE_IMAGES_REPOSITORY}:USD{OCP_RELEASE}-USD{ARCHITECTURE}", "apiVersion: v1 kind: ConfigMap metadata: name: my-registry-ca data: updateservice-registry: | 1 -----BEGIN CERTIFICATE----- -----END CERTIFICATE----- registry-with-port.example.com..5000: | 2 -----BEGIN CERTIFICATE----- -----END CERTIFICATE-----", "oc get secret/pull-secret -n openshift-config --template='{{index .data \".dockerconfigjson\" | base64decode}}' ><pull_secret_location> 1", "oc registry login --registry=\"<registry>\" \\ 1 --auth-basic=\"<username>:<password>\" \\ 2 --to=<pull_secret_location> 3", "oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=<pull_secret_location> 1", "apiVersion: v1 kind: Namespace metadata: name: openshift-update-service annotations: openshift.io/node-selector: \"\" labels: openshift.io/cluster-monitoring: \"true\" 1", "oc create -f <filename>.yaml", "oc create -f update-service-namespace.yaml", "apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: update-service-operator-group spec: targetNamespaces: - openshift-update-service", "oc -n openshift-update-service create -f <filename>.yaml", "oc -n openshift-update-service create -f update-service-operator-group.yaml", "apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: update-service-subscription spec: channel: v1 installPlanApproval: \"Automatic\" source: \"redhat-operators\" 1 sourceNamespace: \"openshift-marketplace\" name: \"cincinnati-operator\"", "oc create -f <filename>.yaml", "oc -n openshift-update-service create -f update-service-subscription.yaml", "oc -n openshift-update-service get clusterserviceversions", "NAME DISPLAY VERSION REPLACES PHASE update-service-operator.v4.6.0 OpenShift Update Service 4.6.0 Succeeded", "FROM registry.access.redhat.com/ubi9/ubi:latest RUN curl -L -o cincinnati-graph-data.tar.gz https://api.openshift.com/api/upgrades_info/graph-data RUN mkdir -p /var/lib/cincinnati-graph-data && tar xvzf cincinnati-graph-data.tar.gz -C /var/lib/cincinnati-graph-data/ --no-overwrite-dir --no-same-owner CMD [\"/bin/bash\", \"-c\" ,\"exec cp -rp /var/lib/cincinnati-graph-data/* /var/lib/cincinnati/graph-data\"]", "podman build -f ./Dockerfile -t registry.example.com/openshift/graph-data:latest", "podman push registry.example.com/openshift/graph-data:latest", "NAMESPACE=openshift-update-service", "NAME=service", "RELEASE_IMAGES=registry.example.com/ocp4/openshift4-release-images", "GRAPH_DATA_IMAGE=registry.example.com/openshift/graph-data:latest", "oc -n \"USD{NAMESPACE}\" create -f - <<EOF apiVersion: updateservice.operator.openshift.io/v1 kind: UpdateService metadata: name: USD{NAME} spec: replicas: 2 releases: USD{RELEASE_IMAGES} graphDataImage: USD{GRAPH_DATA_IMAGE} EOF", "while sleep 1; do POLICY_ENGINE_GRAPH_URI=\"USD(oc -n \"USD{NAMESPACE}\" get -o jsonpath='{.status.policyEngineURI}/api/upgrades_info/v1/graph{\"\\n\"}' updateservice \"USD{NAME}\")\"; SCHEME=\"USD{POLICY_ENGINE_GRAPH_URI%%:*}\"; if test \"USD{SCHEME}\" = http -o \"USD{SCHEME}\" = https; then break; fi; done", "while sleep 10; do HTTP_CODE=\"USD(curl --header Accept:application/json --output /dev/stderr --write-out \"%{http_code}\" \"USD{POLICY_ENGINE_GRAPH_URI}?channel=stable-4.6\")\"; if test \"USD{HTTP_CODE}\" -eq 200; then break; fi; echo \"USD{HTTP_CODE}\"; done", "NAMESPACE=openshift-update-service", "NAME=service", "POLICY_ENGINE_GRAPH_URI=\"USD(oc -n \"USD{NAMESPACE}\" get -o jsonpath='{.status.policyEngineURI}/api/upgrades_info/v1/graph{\"\\n\"}' updateservice \"USD{NAME}\")\"", "PATCH=\"{\\\"spec\\\":{\\\"upstream\\\":\\\"USD{POLICY_ENGINE_GRAPH_URI}\\\"}}\"", "oc patch clusterversion version -p USDPATCH --type merge", "oc get machinehealthcheck -n openshift-machine-api", "oc -n openshift-machine-api annotate mhc <mhc-name> cluster.x-k8s.io/paused=\"\"", "apiVersion: machine.openshift.io/v1beta1 kind: MachineHealthCheck metadata: name: example namespace: openshift-machine-api annotations: cluster.x-k8s.io/paused: \"\" spec: selector: matchLabels: role: worker unhealthyConditions: - type: \"Ready\" status: \"Unknown\" timeout: \"300s\" - type: \"Ready\" status: \"False\" timeout: \"300s\" maxUnhealthy: \"40%\" status: currentHealthy: 5 expectedMachines: 5", "oc -n openshift-machine-api annotate mhc <mhc-name> cluster.x-k8s.io/paused-", "oc adm release info -o 'jsonpath={.digest}{\"\\n\"}' quay.io/openshift-release-dev/ocp-release:USD{OCP_RELEASE_VERSION}-USD{ARCHITECTURE}", "sha256:a8bfba3b6dddd1a2fbbead7dac65fe4fb8335089e4e7cae327f3bad334add31d", "oc adm upgrade --allow-explicit-upgrade --to-image <defined_registry>/<defined_repository>@<digest>", "oc adm migrate icsp <file_name>.yaml <file_name>.yaml <file_name>.yaml --dest-dir <path_to_the_directory>", "oc adm migrate icsp icsp.yaml icsp-2.yaml --dest-dir idms-files", "wrote ImageDigestMirrorSet to idms-files/imagedigestmirrorset_ubi8repo.5911620242173376087.yaml wrote ImageDigestMirrorSet to idms-files/imagedigestmirrorset_ubi9repo.6456931852378115011.yaml", "oc create -f <path_to_the_directory>/<file-name>.yaml", "oc adm catalog mirror <local_registry>/<pull_spec> <local_registry> -a <pull_secret_file> --icsp-scope=registry", "oc apply -f imageContentSourcePolicy.yaml", "oc get ImageContentSourcePolicy -o yaml", "apiVersion: v1 items: - apiVersion: operator.openshift.io/v1alpha1 kind: ImageContentSourcePolicy metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {\"apiVersion\":\"operator.openshift.io/v1alpha1\",\"kind\":\"ImageContentSourcePolicy\",\"metadata\":{\"annotations\":{},\"name\":\"redhat-operator-index\"},\"spec\":{\"repositoryDigestMirrors\":[{\"mirrors\":[\"local.registry:5000\"],\"source\":\"registry.redhat.io\"}]}}", "oc get updateservice -n openshift-update-service", "NAME AGE service 6s", "oc delete updateservice service -n openshift-update-service", "updateservice.updateservice.operator.openshift.io \"service\" deleted", "oc project openshift-update-service", "Now using project \"openshift-update-service\" on server \"https://example.com:6443\".", "oc get operatorgroup", "NAME AGE openshift-update-service-fprx2 4m41s", "oc delete operatorgroup openshift-update-service-fprx2", "operatorgroup.operators.coreos.com \"openshift-update-service-fprx2\" deleted", "oc get subscription", "NAME PACKAGE SOURCE CHANNEL update-service-operator update-service-operator updateservice-index-catalog v1", "oc get subscription update-service-operator -o yaml | grep \" currentCSV\"", "currentCSV: update-service-operator.v0.0.1", "oc delete subscription update-service-operator", "subscription.operators.coreos.com \"update-service-operator\" deleted", "oc delete clusterserviceversion update-service-operator.v0.0.1", "clusterserviceversion.operators.coreos.com \"update-service-operator.v0.0.1\" deleted" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html/updating_clusters/updating-a-cluster-in-a-disconnected-environment
Chapter 9. Message delivery
Chapter 9. Message delivery 9.1. Sending messages To send a message, override the on_sendable event handler and call the sender::send() method. The sendable event fires when the proton::sender has enough credit to send at least one message. Example: Sending messages struct example_handler : public proton::messaging_handler { void on_container_start(proton::container& cont) override { proton::connection conn = cont.connect("amqp://example.com"); conn.open_sender("jobs"); } void on_sendable(proton::sender& snd) override { proton::message msg {"job-1"}; snd.send(msg); } }; 9.2. Tracking sent messages When a message is sent, the sender can keep a reference to the tracker object representing the transfer. The receiver accepts or rejects each message that is delivered. The sender is notified of the outcome for each tracked delivery. To monitor the outcome of a sent message, override the on_tracker_accept and on_tracker_reject event handlers and map the delivery state update to the tracker returned from send() . Example: Tracking sent messages void on_sendable(proton::sender& snd) override { proton::message msg {"job-1"}; proton::tracker trk = snd.send(msg); } void on_tracker_accept(proton::tracker& trk) override { std::cout << "Delivery for " << trk << " is accepted\n"; } void on_tracker_reject(proton::tracker& trk) override { std::cout << "Delivery for " << trk << " is rejected\n"; } 9.3. Receiving messages To receive messages, create a receiver and override the on_message event handler. Example: Receiving messages struct example_handler : public proton::messaging_handler { void on_container_start(proton::container& cont) override { proton::connection conn = cont.connect("amqp://example.com"); conn.open_receiver("jobs") ; } void on_message(proton::delivery& dlv, proton::message& msg) override { std::cout << "Received message '" << msg.body() << "'\n"; } }; 9.4. Acknowledging received messages To explicitly accept or reject a delivery, use the delivery::accept() or delivery::reject() methods in the on_message event handler. Example: Acknowledging received messages void on_message(proton::delivery& dlv, proton::message& msg) override { try { process_message(msg); dlv.accept(); } catch (std::exception& e) { dlv.reject(); } } By default, if you do not explicity acknowledge a delivery, then the library accepts it after on_message returns. To disable this behavior, set the auto_accept receiver option to false.
[ "struct example_handler : public proton::messaging_handler { void on_container_start(proton::container& cont) override { proton::connection conn = cont.connect(\"amqp://example.com\"); conn.open_sender(\"jobs\"); } void on_sendable(proton::sender& snd) override { proton::message msg {\"job-1\"}; snd.send(msg); } };", "void on_sendable(proton::sender& snd) override { proton::message msg {\"job-1\"}; proton::tracker trk = snd.send(msg); } void on_tracker_accept(proton::tracker& trk) override { std::cout << \"Delivery for \" << trk << \" is accepted\\n\"; } void on_tracker_reject(proton::tracker& trk) override { std::cout << \"Delivery for \" << trk << \" is rejected\\n\"; }", "struct example_handler : public proton::messaging_handler { void on_container_start(proton::container& cont) override { proton::connection conn = cont.connect(\"amqp://example.com\"); conn.open_receiver(\"jobs\") ; } void on_message(proton::delivery& dlv, proton::message& msg) override { std::cout << \"Received message '\" << msg.body() << \"'\\n\"; } };", "void on_message(proton::delivery& dlv, proton::message& msg) override { try { process_message(msg); dlv.accept(); } catch (std::exception& e) { dlv.reject(); } }" ]
https://docs.redhat.com/en/documentation/red_hat_amq/2021.q1/html/using_the_amq_cpp_client/message_delivery
Chapter 6. EgressQoS [k8s.ovn.org/v1]
Chapter 6. EgressQoS [k8s.ovn.org/v1] Description EgressQoS is a CRD that allows the user to define a DSCP value for pods egress traffic on its namespace to specified CIDRs. Traffic from these pods will be checked against each EgressQoSRule in the namespace's EgressQoS, and if there is a match the traffic is marked with the relevant DSCP value. Type object 6.1. Specification Property Type Description apiVersion string APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources kind string Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds metadata ObjectMeta Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata spec object EgressQoSSpec defines the desired state of EgressQoS status object EgressQoSStatus defines the observed state of EgressQoS 6.1.1. .spec Description EgressQoSSpec defines the desired state of EgressQoS Type object Required egress Property Type Description egress array a collection of Egress QoS rule objects egress[] object 6.1.2. .spec.egress Description a collection of Egress QoS rule objects Type array 6.1.3. .spec.egress[] Description Type object Required dscp Property Type Description dscp integer DSCP marking value for matching pods' traffic. dstCIDR string DstCIDR specifies the destination's CIDR. Only traffic heading to this CIDR will be marked with the DSCP value. This field is optional, and in case it is not set the rule is applied to all egress traffic regardless of the destination. podSelector object PodSelector applies the QoS rule only to the pods in the namespace whose label matches this definition. This field is optional, and in case it is not set results in the rule being applied to all pods in the namespace. 6.1.4. .spec.egress[].podSelector Description PodSelector applies the QoS rule only to the pods in the namespace whose label matches this definition. This field is optional, and in case it is not set results in the rule being applied to all pods in the namespace. Type object Property Type Description matchExpressions array matchExpressions is a list of label selector requirements. The requirements are ANDed. matchExpressions[] object A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. matchLabels object (string) matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. 6.1.5. .spec.egress[].podSelector.matchExpressions Description matchExpressions is a list of label selector requirements. The requirements are ANDed. Type array 6.1.6. .spec.egress[].podSelector.matchExpressions[] Description A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. Type object Required key operator Property Type Description key string key is the label key that the selector applies to. operator string operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. values array (string) values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. 6.1.7. .status Description EgressQoSStatus defines the observed state of EgressQoS Type object 6.2. API endpoints The following API endpoints are available: /apis/k8s.ovn.org/v1/egressqoses GET : list objects of kind EgressQoS /apis/k8s.ovn.org/v1/namespaces/{namespace}/egressqoses DELETE : delete collection of EgressQoS GET : list objects of kind EgressQoS POST : create an EgressQoS /apis/k8s.ovn.org/v1/namespaces/{namespace}/egressqoses/{name} DELETE : delete an EgressQoS GET : read the specified EgressQoS PATCH : partially update the specified EgressQoS PUT : replace the specified EgressQoS /apis/k8s.ovn.org/v1/namespaces/{namespace}/egressqoses/{name}/status GET : read status of the specified EgressQoS PATCH : partially update status of the specified EgressQoS PUT : replace status of the specified EgressQoS 6.2.1. /apis/k8s.ovn.org/v1/egressqoses HTTP method GET Description list objects of kind EgressQoS Table 6.1. HTTP responses HTTP code Reponse body 200 - OK EgressQoSList schema 401 - Unauthorized Empty 6.2.2. /apis/k8s.ovn.org/v1/namespaces/{namespace}/egressqoses HTTP method DELETE Description delete collection of EgressQoS Table 6.2. HTTP responses HTTP code Reponse body 200 - OK Status schema 401 - Unauthorized Empty HTTP method GET Description list objects of kind EgressQoS Table 6.3. HTTP responses HTTP code Reponse body 200 - OK EgressQoSList schema 401 - Unauthorized Empty HTTP method POST Description create an EgressQoS Table 6.4. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 6.5. Body parameters Parameter Type Description body EgressQoS schema Table 6.6. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 201 - Created EgressQoS schema 202 - Accepted EgressQoS schema 401 - Unauthorized Empty 6.2.3. /apis/k8s.ovn.org/v1/namespaces/{namespace}/egressqoses/{name} Table 6.7. Global path parameters Parameter Type Description name string name of the EgressQoS HTTP method DELETE Description delete an EgressQoS Table 6.8. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed Table 6.9. HTTP responses HTTP code Reponse body 200 - OK Status schema 202 - Accepted Status schema 401 - Unauthorized Empty HTTP method GET Description read the specified EgressQoS Table 6.10. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 401 - Unauthorized Empty HTTP method PATCH Description partially update the specified EgressQoS Table 6.11. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 6.12. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 401 - Unauthorized Empty HTTP method PUT Description replace the specified EgressQoS Table 6.13. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 6.14. Body parameters Parameter Type Description body EgressQoS schema Table 6.15. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 201 - Created EgressQoS schema 401 - Unauthorized Empty 6.2.4. /apis/k8s.ovn.org/v1/namespaces/{namespace}/egressqoses/{name}/status Table 6.16. Global path parameters Parameter Type Description name string name of the EgressQoS HTTP method GET Description read status of the specified EgressQoS Table 6.17. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 401 - Unauthorized Empty HTTP method PATCH Description partially update status of the specified EgressQoS Table 6.18. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 6.19. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 401 - Unauthorized Empty HTTP method PUT Description replace status of the specified EgressQoS Table 6.20. Query parameters Parameter Type Description dryRun string When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed fieldValidation string fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. Table 6.21. Body parameters Parameter Type Description body EgressQoS schema Table 6.22. HTTP responses HTTP code Reponse body 200 - OK EgressQoS schema 201 - Created EgressQoS schema 401 - Unauthorized Empty
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.15/html/network_apis/egressqos-k8s-ovn-org-v1
Chapter 1. JBoss EAP Security Domains
Chapter 1. JBoss EAP Security Domains 1.1. About Security Domains Security domains are part of the JBoss EAP 6 security subsystem. All security configuration is now managed centrally, by the domain controller of a managed domain, or by the standalone server. A security domain consists of configurations for authentication, authorization, security mapping, and auditing. It implements Java Authentication and Authorization Service (JAAS) declarative security. Authentication refers to verifying the identity of a user. In security terminology, this user is referred to as a principal . Although authentication and authorization are different, many of the included authentication modules also handle authorization. Authorization is a process by which the server determines if an authenticated user has permission or privileges to access specific resources in the system or operation. Security mapping refers to the ability to add, modify, or delete information from a principal, role, or attribute before passing the information to your application. The auditing manager allows you to configure provider modules to control the way that security events are reported. If you use security domains, you can remove all specific security configuration from your application itself. This allows you to change security parameters centrally. One common scenario that benefits from this type of configuration structure is the process of moving applications between testing and production environments.
null
https://docs.redhat.com/en/documentation/red_hat_jboss_data_virtualization/6.4/html/security_guide/chap-jboss_eap_security_domains
Chapter 7. Using Hibernate with JBoss Data Virtualization
Chapter 7. Using Hibernate with JBoss Data Virtualization 7.1. Configure Hibernate for Use with JBoss Data Virtualization Prerequisites You must have the JBoss Data Virtualization JDBC API client JAR file ( teiid-client.jar ) and the JBoss Data Virtualization hibernate dialect JAR file ( teiid-hibernate-dialect- VERSION .jar ) in Hibernate's classpath. These files are found in EAP_HOME /modules/system/layers/dv/org/jboss/teiid/client/main/ . These are required for the org.teiid.dialect.TeiidDialect , org.teiid.jdbc.TeiidDriver and org.teiid.jdbc.TeiidDataSource classes. Procedure 7.1. Configure Hibernate for Use with JBoss Data Virtualization Open the Hibernate configuration file Open the hibernate.cfg.xml file. Specify the JBoss Data Virtualization driver class Specify the JBoss Data Virtualization driver class in the connection.driver_class property: Set the Connection URL Specify the URL for the VDB in the connection.url property: Note Be sure to use a local connection if Hibernate is in the same VM as the application server. . Specify the dialect class Specify the JBoss Data Virtualization dialect class in the dialect property: Note Alternatively, the connection properties can be added to the hibernate.properties file instead of hibernate.cfg.xml : Note Since your VDBs will likely contain multiple source and view models with identical table names, you will need to fully qualify table names specified in Hibernate mapping files: For example:
[ "<property name=\"connection.driver_class\"> org.teiid.jdbc.TeiidDriver </property>", "<property name=\"connection.url\"> jdbc:teiid:VDB-NAME@mm://HOST:POST;user=USERNAME;password=PASSWORD </property>", "<property name=\"dialect\"> org.teiid.dialect.TeiidDialect </property>", "hibernate.connection.driver_class=org.teiid.jdbc.TeiidDriver hibernate.connection.url=jdbc:teiid:VDB-NAME@mm://HOST:PORT hibernate.connection.username=USERNAME hibernate.connection.password=PASSWORD hibernate.dialect=org.teiid.dialect.TeiidDialect", "<class name=\"CLASSNAME\" table=\"SOURCE/VIEW_MODEL_NAME.[SCHEMA_NAME.]TABLENAME\"> </class>", "<class name=\"org.teiid.example.Publisher\" table=\"BOOKS.BOOKS.PUBLISHERS\"> </class>" ]
https://docs.redhat.com/en/documentation/red_hat_jboss_data_virtualization/6.4/html/development_guide_volume_1_client_development/chap-using_hibernate_with_jboss_data_virtualization
Chapter 17. Configuring time-based account lockout policies
Chapter 17. Configuring time-based account lockout policies You can use the Account Policy plug-in to configure different time-based lockout policies, such as: Automatically disabling accounts a certain amount of time the last successful login Automatically disabling accounts a certain amount of time after you created them Automatically disabling accounts a certain amount of time after password expiry Automatically disabling account on both account inactivity and password expiration 17.1. Automatically disabling accounts a certain amount of time the last successful login Follow this procedure to configure a time-based lockout policy that inactivates users under the dc=example,dc=com entry who do not log in for more than 21 days. This the account inactivity feature to ensure, for example if an employee left the company and the administrator forgets to delete the account, that Directory Server inactivates the account after a certain amount of time. Procedure Enable the Account Policy plug-in: # dsconf -D " cn=Directory Manager " ldap://server.example.com plugin account-policy enable Configure the plug-in configuration entry: # dsconf -D " cn=Directory Manager " ldap://server.example.com plugin account-policy config-entry set " cn=config,cn=Account Policy Plugin,cn=plugins,cn=config " --always-record-login yes --state-attr lastLoginTime --alt-state-attr 1.1 --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit This command uses the following options: --always-record-login yes : Enables logging of the login time. This is required to use Class of Service (CoS) or roles with account policies, even if it does not have the acctPolicySubentry attribute set. --state-attr lastLoginTime : Configures that the Account Policy plug-in stores the last login time in the lastLoginTime attribute of users. --alt-state-attr 1.1 : Disables using an alternative attribute to check if the primary one does not exist. By default, Directory Server uses the createTimestamp attribute as alternative. However, this causes that Directory Server logs out existing users automatically if their account do not have the lastLoginTime attribute set and createTimestamp is older than the configured inactivity period. Disabling the alternative attribute causes that Directory Server automatically adds the lastLoginTime attribute to user entries when they log in the time. --spec-attr acctPolicySubentry : Configures Directory Server to apply the policy to entries that have the acctPolicySubentry attribute set. You configure this attribute in the CoS entry. --limit-attr accountInactivityLimit : Configures that the accountInactivityLimit attribute in the account inactivation policy entry stores the inactivity time. Restart the instance: # dsctl instance_name restart Create the account inactivation policy entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=Account Inactivation Policy,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: accountpolicy accountInactivityLimit: 1814400 cn: Account Inactivation Policy The value in the accountInactivityLimit attribute configures that Directory Server inactivates accounts 1814400 seconds (21 days) after the last log in. Create the CoS template entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=TemplateCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: cosTemplate acctPolicySubentry: cn=Account Inactivation Policy,dc=example,dc=com This template entry references the account inactivation policy. Create the CoS definition entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=DefinitionCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectclass: cosSuperDefinition objectclass: cosPointerDefinition cosTemplateDn: cn=TemplateCoS,dc=example,dc=com cosAttribute: acctPolicySubentry default operational-default This definition entry references the CoS template entry and causes that the acctPolicySubentry attribute appears in each user entry with a value set to cn=Account Inactivation Policy,dc=example,dc=com . Verification Set the lastLoginTime attribute of a user to a value that is older than the inactivity time you configured: # ldapmodify -H ldap://server.example.com -x -D " cn=Directory Manager " -W dn: uid=example,ou=People,dc=example,dc=com changetype: modify replace: lastLoginTime lastLoginTime: 20210101000000Z Try to connect to the directory as a this user: # ldapsearch -H ldap://server.example.com -x -D " uid=example,ou=People,dc=example,dc=com " -W -b " dc=example,dc=com " ldap_bind: Constraint violation (19) additional info: Account inactivity limit exceeded. Contact system administrator to reset. If Directory Server denies access and returns this error, account inactivity works. Additional resources Re-enabling accounts that reached the inactivity limit 17.2. Automatically disabling accounts a certain amount of time after you created them Follow this procedure to configure that accounts in the dc=example,dc=com entry expire 60 days after the administrator created them. Use the account expiration feature, for example, to ensure that accounts for external workers are locked a certain amount of time after they have been created. Procedure Enable the Account Policy plug-in: # dsconf -D " cn=Directory Manager " ldap://server.example.com plugin account-policy enable Configure the plug-in configuration entry: # dsconf -D " cn=Directory Manager " ldap://server.example.com plugin account-policy config-entry set " cn=config,cn=Account Policy Plugin,cn=plugins,cn=config " --always-record-login yes --state-attr createTimestamp --alt-state-attr 1.1 --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit This command uses the following options: --always-record-login yes : Enables logging of the login time. This is required to use Class of Service (CoS) or roles with account policies, even if it does not have the acctPolicySubentry attribute set. --state-attr createTimestamp : Configures that the Account Policy plug-in uses the value of the createTimestamp attribute to calculate whether an account is expired. --alt-state-attr 1.1 : Disables using an alternative attribute to check if the primary one does not exist. --spec-attr acctPolicySubentry : Configures Directory Server to apply the policy to entries that have the acctPolicySubentry attribute set. You configure this attribute in the CoS entry. --limit-attr accountInactivityLimit : Configures that the accountInactivityLimit attribute in the account expiration policy entry stores the maximum age. Restart the instance: # dsctl instance_name restart Create the account expiration policy entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=Account Expiration Policy,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: accountpolicy accountInactivityLimit: 5184000 cn: Account Expiration Policy The value in the accountInactivityLimit attribute configures that accounts expire 5184000 seconds (60 days) after they have been created. Create the CoS template entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=TemplateCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: cosTemplate acctPolicySubentry: cn=Account Expiration Policy,dc=example,dc=com This template entry references the account expiration policy. Create the CoS definition entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=DefinitionCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectclass: cosSuperDefinition objectclass: cosPointerDefinition cosTemplateDn: cn=TemplateCoS,dc=example,dc=com cosAttribute: acctPolicySubentry default operational-default This definition entry references the CoS template entry and causes that the acctPolicySubentry attribute appears in each user entry with a value set to cn=Account Expiration Policy,dc=example,dc=com . Verification Try to connect to the directory as a user stored in the dc=example,dc=com entry whose createTimestamp attribute is set to a value more than 60 days ago: # ldapsearch -H ldap://server.example.com -x -D " uid=example,dc=example,dc=com " -W -b " dc=example,dc=com " ldap_bind: Constraint violation (19) additional info: Account inactivity limit exceeded. Contact system administrator to reset. If Directory Server denies access and returns this error, account expiration works. Additional resources Re-enabling accounts that reached the inactivity limit 17.3. Automatically disabling accounts a certain amount of time after password expiry Follow this procedure to configure a time-based lockout policy that inactivates users under the dc=example,dc=com entry who do not change their password for more than 28 days. Prerequisites Users must have the passwordExpirationTime attribute set in their entry. Procedure Enable the password expiration feature: # dsconf -D " cn=Directory Manager " ldap://server.example.com config replace passwordExp=on Enable the Account Policy plug-in: # dsconf -D " cn=Directory Manager " ldap://server.example.com plugin account-policy enable Configure the plug-in configuration entry: # dsconf -D " cn=Directory Manager " ldap://server.example.com plugin account-policy config-entry set " cn=config,cn=Account Policy Plugin,cn=plugins,cn=config " --always-record-login yes --always-record-login-attr lastLoginTime --state-attr non_existent_attribute --alt-state-attr passwordExpirationTime --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit This command uses the following options: --always-record-login yes : Enables logging of the login time. This is required to use Class of Service (CoS) or roles with account policies, even if it does not have the acctPolicySubentry attribute set. --always-record-login-attr lastLoginTime : Configures that the Account Policy plug-in stores the last login time in the lastLoginTime attribute of users. --state-attr non_existent_attribute : Sets the primary time attribute used to evaluate an account policy to a non-existent dummy attribute name. --alt-state-attr `passwordExpirationTime : Configures the plug-in to use the passwordExpirationTime attribute as the alternative attribute to check. --spec-attr acctPolicySubentry : Configures Directory Server to apply the policy to entries that have the acctPolicySubentry attribute set. You configure this attribute in the CoS entry. --limit-attr accountInactivityLimit : Configures that the accountInactivityLimit attribute in the account policy entry stores the time when accounts are inactivated after their last password change. Restart the instance: # dsctl instance_name restart Create the account inactivation policy entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=Account Inactivation Policy,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: accountpolicy accountInactivityLimit: 2419200 cn: Account Inactivation Policy The value in the accountInactivityLimit attribute configures that Directory Server inactivates accounts 2419200 seconds (28 days) after the password was changed. Create the CoS template entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=TemplateCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: cosTemplate acctPolicySubentry: cn=Account Inactivation Policy,dc=example,dc=com This template entry references the account inactivation policy. Create the CoS definition entry: # ldapadd -D " cn=Directory Manager " -W -H ldap://server.example.com -x dn: cn=DefinitionCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectclass: cosSuperDefinition objectclass: cosPointerDefinition cosTemplateDn: cn=TemplateCoS,dc=example,dc=com cosAttribute: acctPolicySubentry default operational-default This definition entry references the CoS template entry and causes that the acctPolicySubentry attribute appears in each user entry with a value set to cn=Account Inactivation Policy,dc=example,dc=com . Verification Set the passwordExpirationTime attribute of a user to a value that is older than the inactivity time you configured: # ldapmodify -H ldap://server.example.com -x -D " cn=Directory Manager " -W dn: uid=example,ou=People,dc=example,dc=com changetype: modify replace: passwordExpirationTime passwordExpirationTime: 20210101000000Z Try to connect to the directory as a this user: # ldapsearch -H ldap://server.example.com -x -D " uid=example,ou=People,dc=example,dc=com " -W -b " dc=example,dc=com " ldap_bind: Constraint violation (19) additional info: Account inactivity limit exceeded. Contact system administrator to reset. If Directory Server denies access and returns this error, account inactivity works. Additional resources Re-enabling accounts that reached the inactivity limit 17.4. Automatically disabling account on both account inactivity and password expiration You can apply both account inactivity and password expiration when a user authenticates by using the checkAllStateAttrs setting. By default, when checkAllStateAttrs is not present in the plug-in configuration entry, or when you set this parameter to no , the plug-in checks for the state attribute lastLoginTime . If the attribute is not present in the entry, the plug-in checks the alternate state attribute. You can set the main state attribute to a non-existent attribute and set the alternate state attribute to passwordExpirationtime when you want the plug-in to handle expiration based on the passwordExpirationtime attribute. When you enable this parameter it check's the main state attribute and if the account is fine it then check's the alternate state attribute. This differs from the password policy's password expiration, in that the account policy plug-in completely disables the account if the passwordExpirationtime exceeds the inactivity limit. While with the password policy expiration the user can still log in and change their password. The account policy plug-in completely blocks the user from doing anything and an administrator must reset the account. Procedure Create the plug-in configuration entry and enable the setting: Restart the server to load the new plug-in configuration: Warning The checkAllStateAttrs setting is designed to only work when the alternate state attribute is set to passwordExpiratontime . Setting it to createTimestamp can cause undesired results and entries might get locked out.
[ "dsconf -D \" cn=Directory Manager \" ldap://server.example.com plugin account-policy enable", "dsconf -D \" cn=Directory Manager \" ldap://server.example.com plugin account-policy config-entry set \" cn=config,cn=Account Policy Plugin,cn=plugins,cn=config \" --always-record-login yes --state-attr lastLoginTime --alt-state-attr 1.1 --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit", "dsctl instance_name restart", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=Account Inactivation Policy,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: accountpolicy accountInactivityLimit: 1814400 cn: Account Inactivation Policy", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=TemplateCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: cosTemplate acctPolicySubentry: cn=Account Inactivation Policy,dc=example,dc=com", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=DefinitionCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectclass: cosSuperDefinition objectclass: cosPointerDefinition cosTemplateDn: cn=TemplateCoS,dc=example,dc=com cosAttribute: acctPolicySubentry default operational-default", "ldapmodify -H ldap://server.example.com -x -D \" cn=Directory Manager \" -W dn: uid=example,ou=People,dc=example,dc=com changetype: modify replace: lastLoginTime lastLoginTime: 20210101000000Z", "ldapsearch -H ldap://server.example.com -x -D \" uid=example,ou=People,dc=example,dc=com \" -W -b \" dc=example,dc=com \" ldap_bind: Constraint violation (19) additional info: Account inactivity limit exceeded. Contact system administrator to reset.", "dsconf -D \" cn=Directory Manager \" ldap://server.example.com plugin account-policy enable", "dsconf -D \" cn=Directory Manager \" ldap://server.example.com plugin account-policy config-entry set \" cn=config,cn=Account Policy Plugin,cn=plugins,cn=config \" --always-record-login yes --state-attr createTimestamp --alt-state-attr 1.1 --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit", "dsctl instance_name restart", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=Account Expiration Policy,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: accountpolicy accountInactivityLimit: 5184000 cn: Account Expiration Policy", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=TemplateCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: cosTemplate acctPolicySubentry: cn=Account Expiration Policy,dc=example,dc=com", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=DefinitionCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectclass: cosSuperDefinition objectclass: cosPointerDefinition cosTemplateDn: cn=TemplateCoS,dc=example,dc=com cosAttribute: acctPolicySubentry default operational-default", "ldapsearch -H ldap://server.example.com -x -D \" uid=example,dc=example,dc=com \" -W -b \" dc=example,dc=com \" ldap_bind: Constraint violation (19) additional info: Account inactivity limit exceeded. Contact system administrator to reset.", "dsconf -D \" cn=Directory Manager \" ldap://server.example.com config replace passwordExp=on", "dsconf -D \" cn=Directory Manager \" ldap://server.example.com plugin account-policy enable", "dsconf -D \" cn=Directory Manager \" ldap://server.example.com plugin account-policy config-entry set \" cn=config,cn=Account Policy Plugin,cn=plugins,cn=config \" --always-record-login yes --always-record-login-attr lastLoginTime --state-attr non_existent_attribute --alt-state-attr passwordExpirationTime --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit", "dsctl instance_name restart", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=Account Inactivation Policy,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: accountpolicy accountInactivityLimit: 2419200 cn: Account Inactivation Policy", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=TemplateCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectClass: extensibleObject objectClass: cosTemplate acctPolicySubentry: cn=Account Inactivation Policy,dc=example,dc=com", "ldapadd -D \" cn=Directory Manager \" -W -H ldap://server.example.com -x dn: cn=DefinitionCoS,dc=example,dc=com objectClass: top objectClass: ldapsubentry objectclass: cosSuperDefinition objectclass: cosPointerDefinition cosTemplateDn: cn=TemplateCoS,dc=example,dc=com cosAttribute: acctPolicySubentry default operational-default", "ldapmodify -H ldap://server.example.com -x -D \" cn=Directory Manager \" -W dn: uid=example,ou=People,dc=example,dc=com changetype: modify replace: passwordExpirationTime passwordExpirationTime: 20210101000000Z", "ldapsearch -H ldap://server.example.com -x -D \" uid=example,ou=People,dc=example,dc=com \" -W -b \" dc=example,dc=com \" ldap_bind: Constraint violation (19) additional info: Account inactivity limit exceeded. Contact system administrator to reset.", "dsconf -D \"cn=Directory Manager\" ldap://server.example.com plugin account-policy config-entry set \"cn=config,cn=Account Policy Plugin,cn=plugins,cn=config\" --always-record-login yes --state-attr lastLoginTime --alt-state-attr 1.1 --spec-attr acctPolicySubentry --limit-attr accountInactivityLimit --check-all-state-attrs yes", "dsctl instance_name restart" ]
https://docs.redhat.com/en/documentation/red_hat_directory_server/12/html/securing_red_hat_directory_server/assembly_configuring-time-based-account-lockout-policies_securing-rhds
3.3. Editing an Image Builder blueprint with command-line interface
3.3. Editing an Image Builder blueprint with command-line interface This procedure describes how to edit an existing Image Builder blueprint in the command-line interface. Procedure 1. Save (export) the blueprint to a local text file: 2. Edit the BLUEPRINT-NAME.toml file with a text editor of your choice and make your changes. 3. Before finishing the edits, make sure the file is a valid blueprint: Remove this line, if present: Increase the version number. Remember that Image Builder blueprint versions must use the Semantic Versioning scheme. Note also that if you do not change the version, the patch component of version is increased automatically. Check if the contents are valid TOML specifications. See the TOML documentation for more information. Note TOML documentation is a community product and is not supported by Red Hat. You can report any issues with the tool at https://github.com/toml-lang/toml/issues 4. Save the file and close the editor. 5. Push (import) the blueprint back into Image Builder: Note that you must supply the file name including the .toml extension, while in other commands you use only the name of the blueprint. 6. To verify that the contents uploaded to Image Builder match your edits, list the contents of blueprint: 7. Check whether the components and versions listed in the blueprint and their dependencies are valid:
[ "composer-cli blueprints save BLUEPRINT-NAME", "packages = []", "composer-cli blueprints push BLUEPRINT-NAME.toml", "composer-cli blueprints show BLUEPRINT-NAME", "composer-cli blueprints depsolve BLUEPRINT-NAME" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/image_builder_guide/sect-Documentation-Image_Builder-Test_Chapter3-Test_Section_3
Chapter 30. Kernel
Chapter 30. Kernel libcgroup no longer truncates the values of cgroup subsystem parameters that are longer than 100 characters Previously, the internal representation of a value of any cgroup subsystem parameter was limited to have the length of 100 characters at maximum. Consequently, the libcgroup library truncated the values longer than 100 characters before writing them to a file representing matching cgroup subsystem parameter in the kernel. With this update, the maximal length of values of cgroup subsystem parameters in libcgroup has been extended to 4096 characters. As a result, libcgroup now handles values of cgroup subsystem parameters with any length correctly. (BZ#1549175) The mlx5 device no longer contains a firmware issue Previously, the mlx5 device contained a firmware issue, which caused that the link of mlx5 devices in certain situation dropped after rebooting a system. As a consequence, a message similar to the following was seen in the output of the dmesg command: The issue is fixed in the latest firmware of this device. Contact your hardware vendor for information on how to obtain and install the latest firmware for your mlx5 device. (BZ#1636930)
[ "mlx5_core 0000:af:00.0: Port module event[error]: module 0, Cable error, Bus stuck(I2C or data shorted)" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/7.6_release_notes/bug_fixes_kernel
Chapter 8. alarming
Chapter 8. alarming This chapter describes the commands under the alarming command. 8.1. alarming capabilities list List capabilities of alarming service Usage: Table 8.1. Command arguments Value Summary -h, --help Show this help message and exit Table 8.2. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 8.3. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 8.4. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 8.5. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show.
[ "openstack alarming capabilities list [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty]" ]
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/command_line_interface_reference/alarming
5.4. Moving swap File Systems from a Single Path Device to a Multipath Device
5.4. Moving swap File Systems from a Single Path Device to a Multipath Device By default, swap devices are set up as logical volumes. This does not require any special procedure for configuring them as multipath devices as long as you set up multipathing on the physical volumes that constitute the logical volume group. If your swap device is not an LVM volume, however, and it is mounted by device name, you may need to edit the /etc/fstab file to switch to the appropriate multipath device name. Determine the WWID number of the swap device by running the /sbin/multipath command with the -v3 option. The output from the command should show the swap device in the paths list. You should look in the command output for a line of the following format, showing the swap device: For example, if your swap file system is set up on sda or one of its partitions, you would see a line in the output such as the following: Set up an alias for the swap device in the /etc/multipath.conf file: Edit the /etc/fstab file and replace the old device path to the root device with the multipath device. For example, if you had the following entry in the /etc/fstab file: You would change the entry to the following:
[ "WWID H:B:T:L devname MAJOR : MINOR", "===== paths list ===== 1ATA WDC WD800JD-75MSA3 WD-WMAM9F 1:0:0:0 sda 8:0", "multipaths { multipath { wwid WWID_of_swap_device alias swapdev } }", "/dev/sda2 swap swap defaults 0 0", "/dev/mapper/swapdev swap swap defaults 0 0" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/dm_multipath/move_swap_to_multipath
Chapter 10. Disconnected environment
Chapter 10. Disconnected environment Disconnected environment is a network restricted environment where the Operator Lifecycle Manager (OLM) cannot access the default Operator Hub and image registries, which require internet connectivity. Red Hat supports deployment of OpenShift Data Foundation in disconnected environments where you have installed OpenShift Container Platform in restricted networks. To install OpenShift Data Foundation in a disconnected environment, see Using Operator Lifecycle Manager on restricted networks of the Operators guide in OpenShift Container Platform documentation. Note When you install OpenShift Data Foundation in a restricted network environment, apply a custom Network Time Protocol (NTP) configuration to the nodes, because by default, internet connectivity is assumed in OpenShift Container Platform and chronyd is configured to use the *.rhel.pool.ntp.org servers. For more information, see the Red Hat Knowledgebase solution A newly deployed OCS 4 cluster status shows as "Degraded", Why? and Configuring chrony time service of the Installing guide in OpenShift Container Platform documentation. Red Hat OpenShift Data Foundation version 4.12 introduced the Agent-based Installer for disconnected environment deployment. The Agent-based Installer allows you to use a mirror registry for disconnected installations. For more information, see Preparing to install with Agent-based Installer . Packages to include for OpenShift Data Foundation When you prune the redhat-operator index image, include the following list of packages for the OpenShift Data Foundation deployment: ocs-operator odf-operator mcg-operator odf-csi-addons-operator odr-cluster-operator odr-hub-operator Optional: local-storage-operator Only for local storage deployments. Optional: odf-multicluster-orchestrator Only for Regional Disaster Recovery (Regional-DR) configuration. Important Name the CatalogSource as redhat-operators .
null
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.13/html/planning_your_deployment/disconnected-environment_rhodf
Chapter 5. Configuring Satellite Environment for Performance
Chapter 5. Configuring Satellite Environment for Performance CPU The more physical cores that are available to Satellite, the higher throughput can be achieved for the tasks. Some of the Satellite components such as Puppet and PostgreSQL are CPU intensive applications and can really benefit from the higher number of available CPU cores. Memory The higher amount of memory available in the system running Satellite, the better will be the response times for the Satellite operations. Since Satellite uses PostgreSQL as the database solutions, any additional memory coupled with the tunings will provide a boost to the response times of the applications due to increased data retention in the memory. Disk With Satellite doing heavy IOPS due to repository synchronizations, package data retrieval, high frequency database updates for the subscription records of the content hosts, it is advised that Satellite be installed on a high speed SSD so as to avoid performance bottlenecks which may happen due to increased disk reads or writes. Satellite requires disk IO to be at or above 60 - 80 megabytes per second of average throughput for read operations. Anything below this value can have severe implications for the operation of the Satellite. Satellite components such as PostgreSQL benefit from using SSDs due to their lower latency compared to HDDs. Network The communication between the Satellite Server and Capsules is impacted by the network performance. A decent network with a minimum jitter and low latency is required to enable hassle free operations such as Satellite Server and Capsules synchronization (at least ensure it is not causing connection resets, etc). Server Power Management Your server by default is likely to be configured to conserve power. While this is a good approach to keep the max power consumption in check, it also has a side effect of lowering the performance that Satellite may be able to achieve. For a server running Satellite, it is recommended to set the BIOS to enable the system to be run in performance mode to boost the maximum performance levels that Satellite can achieve. 5.1. Benchmarking Disk Performance We are working to update satellite-maintain to only warn the users when its internal quick storage benchmark results in numbers below our recommended throughput. Also working on an updated benchmark script you can run (which will likely be integrated into satellite-maintain in the future) to get a more accurate real-world storage information. Note You may have to temporarily reduce the RAM in order to run the I/O benchmark. For example, if your Satellite Server has 256 GiB RAM, tests would require 512 GiB of storage to run. As a workaround, you can add mem=20G kernel option in grub during system boot to temporary reduce the size of the RAM. The benchmark creates a file twice the size of the RAM in the specified directory and executes a series of storage I/O tests against it. The size of the file ensures that the test is not just testing the filesystem caching. If you benchmark other filesystems, for example smaller volumes such as PostgreSQL storage, you might have to reduce the RAM size as described above. If you are using different storage solutions such as SAN or iSCSI, you can expect a different performance. Red Hat recommends you to stop all services before executing this script and you will be prompted to do so. This test does not use direct I/O and will utilize file caching as normal operations would. You can find our first version of the script storage-benchmark . To execute it, just download the script to your Satellite, make it executable, and run: As noted in the README block in the script, generally you wish to see on average 100MB/sec or higher in the tests below: Local SSD based storage should give values of 600MB/sec or higher. Spinning disks should give values in the range of 100 - 200MB/sec or higher. If you see values below this, please open a support ticket for assistance. For more information, see Impact of Disk Speed on Satellite Operations . 5.2. Enabling Tuned Profiles Red Hat Enterprise Linux 7 enables the tuned daemon by default during installation. On bare-metal, Red Hat recommends to run the throughput-performance tuned profile on Satellite Server and Capsules. On virtual machines, Red Hat recommends to run the virtual-guest profile. Procedure Check if tuned is running: If tuned is not running, enable it: Optional: View a list of available tuned profiles: Enable a tuned profile depending on your scenario: Transparent Huge Pages is a memory management technique used by the Linux kernel which reduces the overhead of using Translation Lookaside Buffer (TLB) by using larger sized memory pages. Due to databases having Sparse Memory Access patterns instead of Contiguous Memory access patterns, database workloads often perform poorly when Transparent Huge Pages is enabled. To improve performance of PostgreSQL, disable Transparent Huge Pages. In deployments where the PostgreSQL database is running on a separate server, there may be a small benefit to using Transparent Huge Pages on the Satellite Server only. For more information to disable Transparent Huge Pages, see How to disable transparent hugepages (THP) on Red Hat Enterprise Linux 7, 8 .
[ "./storage-benchmark /var/lib/pulp", "systemctl status tuned", "systemctl enable --now tuned", "tuned-adm list", "tuned-adm profile \" My_Tuned_Profile \"" ]
https://docs.redhat.com/en/documentation/red_hat_satellite/6.11/html/performance_tuning_guide/configuring_project_environment_for_performance_performance-tuning
7.3. Booleans
7.3. Booleans SELinux is based on the least level of access required for a service to run. Services can be run in a variety of ways; therefore, you need to specify how you run your services. Use the following Booleans to set up SELinux: allow_cvs_read_shadow This Boolean allows the cvs daemon to access the /etc/shadow file for user authentication. Note Due to the continuous development of the SELinux policy, the list above might not contain all Booleans related to the service at all times. To list them, run the following command as root:
[ "~]# semanage boolean -l | grep service_name" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/managing_confined_services/sect-managing_confined_services-concurrent_versioning_system-booleans
Chapter 18. Annotating encrypted RBD storage classes
Chapter 18. Annotating encrypted RBD storage classes Starting with OpenShift Data Foundation 4.14, when the OpenShift console creates a RADOS block device (RBD) storage class with encryption enabled, the annotation is set automatically. However, you need to add the annotation, cdi.kubevirt.io/clone-strategy=copy for any of the encrypted RBD storage classes that were previously created before updating to the OpenShift Data Foundation version 4.14. This enables customer data integration (CDI) to use host-assisted cloning instead of the default smart cloning. The keys used to access an encrypted volume are tied to the namespace where the volume was created. When cloning an encrypted volume to a new namespace, such as, provisioning a new OpenShift Virtualization virtual machine, a new volume must be created and the content of the source volume must then be copied into the new volume. This behavior is triggered automatically if the storage class is properly annotated.
null
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.16/html/troubleshooting_openshift_data_foundation/annotating-the-existing-encrypted-rbd-storageclasses_rhodf
Chapter 4. Using PostgreSQL
Chapter 4. Using PostgreSQL The PostgreSQL server is an open source robust and highly-extensible database server based on the SQL language. The PostgreSQL server provides an object-relational database system that can manage extensive datasets and a high number of concurrent users. For these reasons, PostgreSQL servers can be used in clusters to manage high amounts of data. The PostgreSQL server includes features for ensuring data integrity, building fault-tolerant environments and applications. With the PostgreSQL server, you can extend a database with your own data types, custom functions, or code from different programming languages without the need to recompile the database. Learn how to install and configure PostgreSQL on a RHEL system, how to back up PostgreSQL data, and how to migrate from an earlier PostgreSQL version. 4.1. Installing PostgreSQL RHEL 9 provides PostgreSQL 13 as the initial version of this Application Stream, which you can install easily as an RPM package. Additional PostgreSQL versions are provided as modules with a shorter life cycle in minor releases of RHEL 9: RHEL 9.2 introduced PostgreSQL 15 as the postgresql:15 module stream RHEL 9.4 introduced PostgreSQL 16 as the postgresql:16 module stream To install PostgreSQL , use the following procedure. Note By design, it is impossible to install more than one version (stream) of the same module in parallel. Therefore, you must choose only one of the available streams from the postgresql module. You can use different versions of the PostgreSQL database server in containers, see Running multiple PostgreSQL versions in containers . Procedure Install the PostgreSQL server packages: For PostgreSQL 13 from the RPM package: For PostgreSQL 15 or PostgreSQL 16 by selecting stream (version) 15 or 16 from the postgresql module and specifying the server profile, for example: The postgres superuser is created automatically. Initialize the database cluster: Red Hat recommends storing the data in the default /var/lib/pgsql/data directory. Start the postgresql service: Enable the postgresql service to start at boot: Important If you want to upgrade from an earlier postgresql stream within RHEL 9, follow both procedures described in Switching to a later stream and in Migrating to a RHEL 9 version of PostgreSQL . 4.2. Running multiple PostgreSQL versions in containers To run different versions of PostgreSQL on the same host, run them in containers because you cannot install multiple versions (streams) of the same module in parallel. This procedure includes PostgreSQL 13 and PostgreSQL 15 as examples but you can use any PostgreSQL container version available in the Red Hat Ecosystem Catalog. Prerequisites The container-tools meta-package is installed. Procedure Use your Red Hat Customer Portal account to authenticate to the registry.redhat.io registry: Skip this step if you are already logged in to the container registry. Run PostgreSQL 13 in a container: For more information about the usage of this container image, see the Red Hat Ecosystem Catalog . Run PostgreSQL 15 in a container: For more information about the usage of this container image, see the Red Hat Ecosystem Catalog . Run PostgreSQL 16 in a container: For more information about the usage of this container image, see the Red Hat Ecosystem Catalog . Note The container names and host ports of the two database servers must differ. To ensure that clients can access the database server on the network, open the host ports in the firewall: Verification Display information about running containers: Connect to the database server and log in as root: Additional resources Building, running, and managing containers Browse containers in the Red Hat Ecosystem Catalog 4.3. Creating PostgreSQL users PostgreSQL users are of the following types: The postgres UNIX system user - should be used only to run the PostgreSQL server and client applications, such as pg_dump . Do not use the postgres system user for any interactive work on PostgreSQL administration, such as database creation and user management. A database superuser - the default postgres PostgreSQL superuser is not related to the postgres system user. You can limit access of the postgres superuser in the pg_hba.conf file, otherwise no other permission limitations exist. You can also create other database superusers. A role with specific database access permissions: A database user - has a permission to log in by default A group of users - enables managing permissions for the group as a whole Roles can own database objects (for example, tables and functions) and can assign object privileges to other roles using SQL commands. Standard database management privileges include SELECT , INSERT , UPDATE , DELETE , TRUNCATE , REFERENCES , TRIGGER , CREATE , CONNECT , TEMPORARY , EXECUTE , and USAGE . Role attributes are special privileges, such as LOGIN , SUPERUSER , CREATEDB , and CREATEROLE . Important Red Hat recommends performing most tasks as a role that is not a superuser. A common practice is to create a role that has the CREATEDB and CREATEROLE privileges and use this role for all routine management of databases and roles. Prerequisites The PostgreSQL server is installed. The database cluster is initialized. Procedure To create a user, set a password for the user, and assign the user the CREATEROLE and CREATEDB permissions: Replace mydbuser with the username and mypasswd with the user's password. Additional resources PostgreSQL database roles PostgreSQL privileges Configuring PostgreSQL Example 4.1. Initializing, creating, and connecting to a PostgreSQL database This example demonstrates how to initialize a PostgreSQL database, create a database user with routine database management privileges, and how to create a database that is accessible from any system account through the database user with management privileges. Install the PosgreSQL server: Initialize the database cluster: Set the password hashing algorithm to scram-sha-256 . In the /var/lib/pgsql/data/postgresql.conf file, change the following line: to: In the /var/lib/pgsql/data/pg_hba.conf file, change the following line for the IPv4 local connections: to: Start the postgresql service: Log in as the system user named postgres : Start the PostgreSQL interactive terminal: Optional: Obtain information about the current database connection: Create a user named mydbuser , set a password for mydbuser , and assign mydbuser the CREATEROLE and CREATEDB permissions: The mydbuser user now can perform routine database management operations: create databases and manage user indexes. Log out of the interactive terminal by using the \q meta command: Log out of the postgres user session: Log in to the PostgreSQL terminal as mydbuser , specify the hostname, and connect to the default postgres database, which was created during initialization: Create a database named mydatabase : Log out of the session: Connect to mydatabase as mydbuser : Optional: Obtain information about the current database connection: 4.4. Configuring PostgreSQL In a PostgreSQL database, all data and configuration files are stored in a single directory called a database cluster. Red Hat recommends storing all data, including configuration files, in the default /var/lib/pgsql/data/ directory. PostgreSQL configuration consists of the following files: postgresql.conf - is used for setting the database cluster parameters. postgresql.auto.conf - holds basic PostgreSQL settings similarly to postgresql.conf . However, this file is under the server control. It is edited by the ALTER SYSTEM queries, and cannot be edited manually. pg_ident.conf - is used for mapping user identities from external authentication mechanisms into the PostgreSQL user identities. pg_hba.conf - is used for configuring client authentication for PostgreSQL databases. To change the PostgreSQL configuration, use the following procedure. Procedure Edit the respective configuration file, for example, /var/lib/pgsql/data/postgresql.conf . Restart the postgresql service so that the changes become effective: Example 4.2. Configuring PostgreSQL database cluster parameters This example shows basic settings of the database cluster parameters in the /var/lib/pgsql/data/postgresql.conf file. Example 4.3. Setting client authentication in PostgreSQL This example demonstrates how to set client authentication in the /var/lib/pgsql/data/pg_hba.conf file. 4.5. Configuring TLS encryption on a PostgreSQL server By default, PostgreSQL uses unencrypted connections. For more secure connections, you can enable Transport Layer Security (TLS) support on the PostgreSQL server and configure your clients to establish encrypted connections. Prerequisites The PostgreSQL server is installed. The database cluster is initialized. If the server runs RHEL 9.2 or later and the FIPS mode is enabled, clients must either support the Extended Master Secret (EMS) extension or use TLS 1.3. TLS 1.2 connections without EMS fail. For more information, see the Red Hat Knowledgebase solution TLS extension "Extended Master Secret" enforced on RHEL 9.2 and later . Procedure Install the OpenSSL library: Generate a TLS certificate and a key: Replace dbhost.yourdomain.com with your database host and domain name. Copy your signed certificate and your private key to the required locations on the database server: Change the owner and group ownership of the signed certificate and your private key to the postgres user: Restrict the permissions for your private key so that it is readable only by the owner: Set the password hashing algorithm to scram-sha-256 by changing the following line in the /var/lib/pgsql/data/postgresql.conf file: to: Configure PostgreSQL to use SSL/TLS by changing the following line in the /var/lib/pgsql/data/postgresql.conf file: to: Restrict access to all databases to accept only connections from clients using TLS by changing the following line for the IPv4 local connections in the /var/lib/pgsql/data/pg_hba.conf file: to: Alternatively, you can restrict access for a single database and a user by adding the following new line: Replace mydatabase with the database name and mydbuser with the username. Make the changes effective by restarting the postgresql service: Verification To manually verify that the connection is encrypted: Connect to the PostgreSQL database as the mydbuser user, specify the hostname and the database name: Replace mydatabase with the database name and mydbuser with the username. Obtain information about the current database connection: You can write a simple application that verifies whether a connection to PostgreSQL is encrypted. This example demonstrates such an application written in C that uses the libpq client library, which is provided by the libpq-devel package: Replace mypassword with the password, mydatabase with the database name, and mydbuser with the username. Note You must load the pq libraries for compilation by using the -lpq option. For example, to compile the application by using the GCC compiler: where the source_file.c contains the example code above, and myapplication is the name of your application for verifying secured PostgreSQL connection. Example 4.4. Initializing, creating, and connecting to a PostgreSQL database using TLS encryption This example demonstrates how to initialize a PostgreSQL database, create a database user and a database, and how to connect to the database using a secured connection. Install the PosgreSQL server: Initialize the database cluster: Install the OpenSSL library: Generate a TLS certificate and a key: Replace dbhost.yourdomain.com with your database host and domain name. Copy your signed certificate and your private key to the required locations on the database server: Change the owner and group ownership of the signed certificate and your private key to the postgres user: Restrict the permissions for your private key so that it is readable only by the owner: Set the password hashing algorithm to scram-sha-256 . In the /var/lib/pgsql/data/postgresql.conf file, change the following line: to: Configure PostgreSQL to use SSL/TLS. In the /var/lib/pgsql/data/postgresql.conf file, change the following line: to: Start the postgresql service: Log in as the system user named postgres : Start the PostgreSQL interactive terminal as the postgres user: Create a user named mydbuser and set a password for mydbuser : Create a database named mydatabase : Grant all permissions to the mydbuser user: Log out of the interactive terminal: Log out of the postgres user session: Restrict access to all databases to accept only connections from clients using TLS by changing the following line for the IPv4 local connections in the /var/lib/pgsql/data/pg_hba.conf file: to: Make the changes effective by restarting the postgresql service: Connect to the PostgreSQL database as the mydbuser user, specify the hostname and the database name: 4.6. Backing up PostgreSQL data To back up PostgreSQL data, use one of the following approaches: SQL dump File system level backup Continuous archiving 4.6.1. Backing up PostgreSQL data with an SQL dump The SQL dump method is based on generating a dump file with SQL commands. When a dump is uploaded back to the database server, it recreates the database in the same state as it was at the time of the dump. The SQL dump is ensured by the following PostgreSQL client applications: pg_dump dumps a single database without cluster-wide information about roles or tablespaces pg_dumpall dumps each database in a given cluster and preserves cluster-wide data, such as role and tablespace definitions. By default, the pg_dump and pg_dumpall commands write their results into the standard output. To store the dump in a file, redirect the output to an SQL file. The resulting SQL file can be either in a text format or in other formats that allow for parallelism and for more detailed control of object restoration. You can perform the SQL dump from any remote host that has access to the database. 4.6.1.1. Advantages and disadvantages of an SQL dump An SQL dump has the following advantages compared to other PostgreSQL backup methods: An SQL dump is the only PostgreSQL backup method that is not server version-specific. The output of the pg_dump utility can be reloaded into later versions of PostgreSQL , which is not possible for file system level backups or continuous archiving. An SQL dump is the only method that works when transferring a database to a different machine architecture, such as going from a 32-bit to a 64-bit server. An SQL dump provides internally consistent dumps. A dump represents a snapshot of the database at the time pg_dump began running. The pg_dump utility does not block other operations on the database when it is running. A disadvantage of an SQL dump is that it takes more time compared to file system level backup. 4.6.1.2. Performing an SQL dump using pg_dump To dump a single database without cluster-wide information, use the pg_dump utility. Prerequisites You must have read access to all tables that you want to dump. To dump the entire database, you must run the commands as the postgres superuser or a user with database administrator privileges. Procedure Dump a database without cluster-wide information: To specify which database server pg_dump will contact, use the following command-line options: The -h option to define the host. The default host is either the local host or what is specified by the PGHOST environment variable. The -p option to define the port. The default port is indicated by the PGPORT environment variable or the compiled-in default. 4.6.1.3. Performing an SQL dump using pg_dumpall To dump each database in a given database cluster and to preserve cluster-wide data, use the pg_dumpall utility. Prerequisites You must run the commands as the postgres superuser or a user with database administrator privileges. Procedure Dump all databases in the database cluster and preserve cluster-wide data: To specify which database server pg_dumpall will contact, use the following command-line options: The -h option to define the host. The default host is either the local host or what is specified by the PGHOST environment variable. The -p option to define the port. The default port is indicated by the PGPORT environment variable or the compiled-in default. The -l option to define the default database. This option enables you to choose a default database different from the postgres database created automatically during initialization. 4.6.1.4. Restoring a database dumped using pg_dump To restore a database from an SQL dump that you dumped using the pg_dump utility, follow the steps below. Prerequisites You must run the commands as the postgres superuser or a user with database administrator privileges. Procedure Create a new database: Verify that all users who own objects or were granted permissions on objects in the dumped database already exist. If such users do not exist, the restore fails to recreate the objects with the original ownership and permissions. Run the psql utility to restore a text file dump created by the pg_dump utility: where dumpfile is the output of the pg_dump command. To restore a non-text file dump, use the pg_restore utility instead: 4.6.1.5. Restoring databases dumped using pg_dumpall To restore data from a database cluster that you dumped using the pg_dumpall utility, follow the steps below. Prerequisites You must run the commands as the postgres superuser or a user with database administrator privileges. Procedure Ensure that all users who own objects or were granted permissions on objects in the dumped databases already exist. If such users do not exist, the restore fails to recreate the objects with the original ownership and permissions. Run the psql utility to restore a text file dump created by the pg_dumpall utility: where dumpfile is the output of the pg_dumpall command. 4.6.1.6. Performing an SQL dump of a database on another server Dumping a database directly from one server to another is possible because pg_dump and psql can write to and read from pipes. Procedure To dump a database from one server to another, run: 4.6.1.7. Handling SQL errors during restore By default, psql continues to execute if an SQL error occurs, causing the database to restore only partially. To change the default behavior, use one of the following approaches when restoring a dump. Prerequisites You must run the commands as the postgres superuser or a user with database administrator privileges. Procedure Make psql exit with an exit status of 3 if an SQL error occurs by setting the ON_ERROR_STOP variable: Specify that the whole dump is restored as a single transaction so that the restore is either fully completed or canceled. When restoring a text file dump using the psql utility: When restoring a non-text file dump using the pg_restore utility: Note that when using this approach, even a minor error can cancel a restore operation that has already run for many hours. Additional resources PostgreSQL Documentation - SQL dump 4.6.2. Backing up PostgreSQL data with a file system level backup To create a file system level backup, copy PostgreSQL database files to another location. For example, you can use any of the following approaches: Create an archive file using the tar utility. Copy the files to a different location using the rsync utility. Create a consistent snapshot of the data directory. 4.6.2.1. Advantages and limitations of file system backing up File system level backing up has the following advantage compared to other PostgreSQL backup methods: File system level backing up is usually faster than an SQL dump. File system level backing up has the following limitations compared to other PostgreSQL backup methods: This backing up method is not suitable when you want to upgrade from RHEL 8 to RHEL 9 and migrate your data to the upgraded system. File system level backup is specific to an architecture and a RHEL major version. You can restore your data on your RHEL 8 system if the upgrade is not successful but you cannot restore the data on a RHEL 9 system. The database server must be shut down before backing up and restoring data. Backing up and restoring certain individual files or tables is impossible. Backing up a file system works only for complete backing up and restoring of an entire database cluster. 4.6.2.2. Performing file system level backing up To perform file system level backing up, use the following procedure. Procedure Choose the location of a database cluster and initialize this cluster: Stop the postgresql service: Use any method to create a file system backup, for example a tar archive: Start the postgresql service: Additional resources PostgreSQL Documentation - file system level backup 4.6.3. Backing up PostgreSQL data by continuous archiving PostgreSQL records every change made to the database's data files into a write ahead log (WAL) file that is available in the pg_wal/ subdirectory of the cluster's data directory. This log is intended primarily for a crash recovery. After a crash, the log entries made since the last checkpoint can be used for restoring the database to a consistency. The continuous archiving method, also known as an online backup, combines the WAL files with a copy of the database cluster in the form of a base backup performed on a running server or a file system level backup. If a database recovery is needed, you can restore the database from the copy of the database cluster and then replay log from the backed up WAL files to bring the system to the current state. With the continuous archiving method, you must keep a continuous sequence of all archived WAL files that extends at minimum back to the start time of your last base backup. Therefore the ideal frequency of base backups depends on: The storage volume available for archived WAL files. The maximum possible duration of data recovery in situations when recovery is necessary. In cases with a long period since the last backup, the system replays more WAL segments, and the recovery therefore takes more time. Note You cannot use pg_dump and pg_dumpall SQL dumps as a part of a continuous archiving backup solution. SQL dumps produce logical backups and do not contain enough information to be used by a WAL replay. 4.6.3.1. Advantages and disadvantages of continuous archiving Continuous archiving has the following advantages compared to other PostgreSQL backup methods: With the continuous backup method, it is possible to use a base backup that is not entirely consistent because any internal inconsistency in the backup is corrected by the log replay. Therefore you can perform a base backup on a running PostgreSQL server. A file system snapshot is not needed; tar or a similar archiving utility is sufficient. Continuous backup can be achieved by continuing to archive the WAL files because the sequence of WAL files for the log replay can be indefinitely long. This is particularly valuable for large databases. Continuous backup supports point-in-time recovery. It is not necessary to replay the WAL entries to the end. The replay can be stopped at any point and the database can be restored to its state at any time since the base backup was taken. If the series of WAL files are continuously available to another machine that has been loaded with the same base backup file, it is possible to restore the other machine with a nearly-current copy of the database at any point. Continuous archiving has the following disadvantages compared to other PostgreSQL backup methods: Continuous backup method supports only restoration of an entire database cluster, not a subset. Continuous backup requires extensive archival storage. 4.6.3.2. Setting up WAL archiving A running PostgreSQL server produces a sequence of write ahead log (WAL) records. The server physically divides this sequence into WAL segment files, which are given numeric names that reflect their position in the WAL sequence. Without WAL archiving, the segment files are reused and renamed to higher segment numbers. When archiving WAL data, the contents of each segment file are captured and saved at a new location before the segment file is reused. You have multiple options where to save the content, such as an NFS-mounted directory on another machine, a tape drive, or a CD. Note that WAL records do not include changes to configuration files. To enable WAL archiving, use the following procedure. Procedure In the /var/lib/pgsql/data/postgresql.conf file: Set the wal_level configuration parameter to replica or higher. Set the archive_mode parameter to on . Specify the shell command in the archive_command configuration parameter. You can use the cp command, another command, or a shell script. Note The archive command is executed only on completed WAL segments. A server that generates little WAL traffic can have a substantial delay between the completion of a transaction and its safe recording in archive storage. To limit how old unarchived data can be, you can: Set the archive_timeout parameter to force the server to switch to a new WAL segment file with a given frequency. Use the pg_switch_wal parameter to force a segment switch to ensure that a transaction is archived immediately after it finishes. Example 4.5. Shell command for archiving WAL segments This example shows a simple shell command you can set in the archive_command configuration parameter. The following command copies a completed segment file to the required location: where the %p parameter is replaced by the relative path to the file to archive and the %f parameter is replaced by the file name. This command copies archivable WAL segments to the /mnt/server/archivedir/ directory. After replacing the %p and %f parameters, the executed command looks as follows: A similar command is generated for each new file that is archived. Restart the postgresql service to enable the changes: Test your archive command and ensure it does not overwrite an existing file and that it returns a nonzero exit status if it fails. To protect your data, ensure that the segment files are archived into a directory that does not have group or world read access. Additional resources PostgreSQL 16 Documentation 4.6.3.3. Making a base backup You can create a base backup in several ways. The simplest way of performing a base backup is using the pg_basebackup utility on a running PostgreSQL server. The base backup process creates a backup history file that is stored into the WAL archive area and is named after the first WAL segment file that you need for the base backup. The backup history file is a small text file containing the starting and ending times, and WAL segments of the backup. If you used the label string to identify the associated dump file, you can use the backup history file to determine which dump file to restore. Note Consider keeping several backup sets to be certain that you can recover your data. Prerequisites You must run the commands as the postgres superuser, a user with database administrator privileges, or another user with at least REPLICATION permissions. You must keep all the WAL segment files generated during and after the base backup. Procedure Use the pg_basebackup utility to perform the base backup. To create a base backup as individual files (plain format): Replace backup_directory with your chosen backup location. If you use tablespaces and perform the base backup on the same host as the server, you must also use the --tablespace-mapping option, otherwise the backup will fail upon an attempt to write the backup to the same location. To create a base backup as a tar archive ( tar and compressed format): Replace backup_directory with your chosen backup location. To restore such data, you must manually extract the files in the correct locations. To specify which database server pg_basebackup will contact, use the following command-line options: The -h option to define the host. The default host is either the local host or a host specified by the PGHOST environment variable. The -p option to define the port. The default port is indicated by the PGPORT environment variable or the compiled-in default. After the base backup process is complete, safely archive the copy of the database cluster and the WAL segment files used during the backup, which are specified in the backup history file. Delete WAL segments numerically lower than the WAL segment files used in the base backup because these are older than the base backup and no longer needed for a restore. Additional resources PostgreSQL Documentation - base backup PostgreSQL Documentation - pg_basebackup utility 4.6.3.4. Restoring the database using a continuous archive backup To restore a database using a continuous backup, use the following procedure. Procedure Stop the server: Copy the necessary data to a temporary location. Preferably, copy the whole cluster data directory and any tablespaces. Note that this requires enough free space on your system to hold two copies of your existing database. If you do not have enough space, save the contents of the cluster's pg_wal directory, which can contain logs that were not archived before the system went down. Remove all existing files and subdirectories under the cluster data directory and under the root directories of any tablespaces you are using. Restore the database files from your base backup. Ensure that: The files are restored with the correct ownership (the database system user, not root ). The files are restored with the correct permissions. The symbolic links in the pg_tblspc/ subdirectory are restored correctly. Remove any files present in the pg_wal/ subdirectory. These files resulted from the base backup and are therefore obsolete. If you did not archive pg_wal/ , recreate it with proper permissions. Copy any unarchived WAL segment files that you saved in step 2 into pg_wal/ . Create the recovery.conf recovery command file in the cluster data directory and specify the shell command in the restore_command configuration parameter. You can use the cp command, another command, or a shell script. For example: Start the server: The server will enter the recovery mode and proceed to read through the archived WAL files that it needs. If the recovery is terminated due to an external error, the server can be restarted and it will continue the recovery. When the recovery process is completed, the server renames recovery.conf to recovery.done . This prevents the server from accidental re-entering the recovery mode after it starts normal database operations. Check the contents of the database to verify that the database has recovered into the required state. If the database has not recovered into the required state, return to step 1. If the database has recovered into the required state, allow the users to connect by restoring the client authentication configuration in the pg_hba.conf file. 4.6.3.4.1. Additional resources Continuous archiving method 4.7. Migrating to a RHEL 9 version of PostgreSQL Red Hat Enterprise Linux 8 provides PostgreSQL in multiple module streams: PostgreSQL 10 (the default postgresql stream), PostgreSQL 9.6 , PostgreSQL 12 , PostgreSQL 13 , PostgreSQL 15 , and PostgreSQL 16 . In RHEL 9, PostgreSQL 13 , PostgreSQL 15 , and PostgreSQL 16 are available. On RHEL, you can use two PostgreSQL migration paths for the database files: Fast upgrade using the pg_upgrade utility Dump and restore upgrade The fast upgrade method is quicker than the dump and restore process. However, in certain cases, the fast upgrade does not work, and you can only use the dump and restore process, for example in case of cross-architecture upgrades. As a prerequisite for migration to a later version of PostgreSQL , back up all your PostgreSQL databases. Dumping the databases and performing backup of the SQL files is required for the dump and restore process and recommended for the fast upgrade method. Before migrating to a later version of PostgreSQL , see the upstream compatibility notes for the version of PostgreSQL to which you want to migrate, and for all skipped PostgreSQL versions between the one you are migrating from and the target version. 4.7.1. Notable differences between PostgreSQL 15 and PostgreSQL 16 PostgreSQL 16 introduced the following notable changes. The postmasters binary is no longer available PostgreSQL is no longer distributed with the postmaster binary. Users who start the postgresql server by using the provided systemd unit file (the systemctl start postgres.service command) are not affected by this change. If you previously started the postgresql server directly through the postmaster binary, you must now use the postgres binary instead. Documentation is no longer packaged PostgreSQL no longer provides documentation in PDF format within the package. Use the online documentation instead. 4.7.2. Notable differences between PostgreSQL 13 and PostgreSQL 15 PostgreSQL 15 introduced the following backwards incompatible changes. Default permissions of the public schema The default permissions of the public schema have been modified in PostgreSQL 15 . Newly created users need to grant permission explicitly by using the GRANT ALL ON SCHEMA public TO myuser; command. The following example works in PostgreSQL 13 and earlier: The following example works in PostgreSQL 15 and later: Note Ensure that the mydbuser access is configured appropriately in the pg_hba.conf file. See Creating PostgreSQL users for more information. PQsendQuery() no longer supported in pipeline mode Since PostgreSQL 15 , the libpq PQsendQuery() function is no longer supported in pipeline mode. Modify affected applications to use the PQsendQueryParams() function instead. 4.7.3. Fast upgrade using the pg_upgrade utility As a system administrator, you can upgrade to the most recent version of PostgreSQL by using the fast upgrade method. To perform a fast upgrade, copy binary data files to the /var/lib/pgsql/data/ directory and use the pg_upgrade utility. You can use this method for migrating data: From the RHEL 8 version of PostgreSQL 12 to a RHEL version of PostgreSQL 13 From a RHEL 8 or 9 version of PostgreSQL 13 to a RHEL version of PostgreSQL 15 From a RHEL 8 or 9 version of PostgreSQL 15 to a RHEL version of PostgreSQL 16 The following procedure describes migration from the RHEL 8 version of PostgreSQL 12 to the RHEL 9 version of PostgreSQL 13 using the fast upgrade method. For migration from postgresql streams other than 12 , use one of the following approaches: Update your PostgreSQL server to version 12 on RHEL 8 and then use the pg_upgrade utility to perform the fast upgrade to RHEL 9 version of PostgreSQL 13 . Use the dump and restore upgrade directly between any RHEL 8 version of PostgreSQL and an equal or later PostgreSQL version in RHEL 9. Prerequisites Before performing the upgrade, back up all your data stored in the PostgreSQL databases. By default, all data is stored in the /var/lib/pgsql/data/ directory on both the RHEL 8 and RHEL 9 systems. Procedure On the RHEL 9 system, install the postgresql-server and postgresql-upgrade packages: Optionally, if you used any PostgreSQL server modules on RHEL 8, install them also on the RHEL 9 system in two versions, compiled both against PostgreSQL 12 (installed as the postgresql-upgrade package) and the target version of PostgreSQL 13 (installed as the postgresql-server package). If you need to compile a third-party PostgreSQL server module, build it both against the postgresql-devel and postgresql-upgrade-devel packages. Check the following items: Basic configuration: On the RHEL 9 system, check whether your server uses the default /var/lib/pgsql/data directory and the database is correctly initialized and enabled. In addition, the data files must be stored in the same path as mentioned in the /usr/lib/systemd/system/postgresql.service file. PostgreSQL servers: Your system can run multiple PostgreSQL servers. Ensure that the data directories for all these servers are handled independently. PostgreSQL server modules: Ensure that the PostgreSQL server modules that you used on RHEL 8 are installed on your RHEL 9 system as well. Note that plugins are installed in the /usr/lib64/pgsql/ directory. Ensure that the postgresql service is not running on either of the source and target systems at the time of copying data. Copy the database files from the source location to the /var/lib/pgsql/data/ directory on the RHEL 9 system. Perform the upgrade process by running the following command as the PostgreSQL user: This launches the pg_upgrade process in the background. In case of failure, postgresql-setup provides an informative error message. Copy the prior configuration from /var/lib/pgsql/data-old to the new cluster. Note that the fast upgrade does not reuse the prior configuration in the newer data stack and the configuration is generated from scratch. If you want to combine the old and new configurations manually, use the *.conf files in the data directories. Start the new PostgreSQL server: Analyze the new database cluster. For PostgreSQL 13 : For PostgreSQL 15 or later: Note You may need to use ALTER COLLATION name REFRESH VERSION , see the upstream documentation for details. If you want the new PostgreSQL server to be automatically started on boot, run: 4.7.4. Dump and restore upgrade When using the dump and restore upgrade, you must dump all databases contents into an SQL file dump file. Note that the dump and restore upgrade is slower than the fast upgrade method and it may require some manual fixing in the generated SQL file. You can use this method for migrating data from any RHEL 8 version of PostgreSQL to any equal or later version of PostgreSQL in RHEL 9. On RHEL 8 and RHEL 9 systems, PostgreSQL data is stored in the /var/lib/pgsql/data/ directory by default. To perform the dump and restore upgrade, change the user to root . The following procedure describes migration from the RHEL 8 default version of PostgreSQL 10 to the RHEL 9 version of PostgreSQL 13 . Procedure On your RHEL 8 system, start the PostgreSQL 10 server: On the RHEL 8 system, dump all databases contents into the pgdump_file.sql file: Ensure that the databases were dumped correctly: As a result, the path to the dumped sql file is displayed: /var/lib/pgsql/pgdump_file.sql . On the RHEL 9 system, install the postgresql-server package: Optionally, if you used any PostgreSQL server modules on RHEL 8, install them also on the RHEL 9 system. If you need to compile a third-party PostgreSQL server module, build it against the postgresql-devel package. On the RHEL 9 system, initialize the data directory for the new PostgreSQL server: On the RHEL 9 system, copy the pgdump_file.sql into the PostgreSQL home directory, and check that the file was copied correctly: Copy the configuration files from the RHEL 8 system: The configuration files to be copied are: /var/lib/pgsql/data/pg_hba.conf /var/lib/pgsql/data/pg_ident.conf /var/lib/pgsql/data/postgresql.conf On the RHEL 9 system, start the new PostgreSQL server: On the RHEL 9 system, import data from the dumped sql file: 4.8. Installing and configuring a PostgreSQL database server by using RHEL system roles You can use the postgresql RHEL system role to automate the installation and management of the PostgreSQL database server. By default, this role also optimizes PostgreSQL by automatically configuring performance-related settings in the PostgreSQL service configuration files. 4.8.1. Configuring PostgreSQL with an existing TLS certificate by using the postgresql RHEL system role If your application requires a PostgreSQL database server, you can configure this service with TLS encryption to enable secure communication between the application and the database. By using the postgresql RHEL system role, you can automate this process and remotely install and configure PostgreSQL with TLS encryption. In the playbook, you can use an existing private key and a TLS certificate that was issued by a certificate authority (CA). Note The postgresql role cannot open ports in the firewalld service. To allow remote access to the PostgreSQL server, add a task that uses the firewall RHEL system role to your playbook. Prerequisites You have prepared the control node and the managed nodes You are logged in to the control node as a user who can run playbooks on the managed nodes. The account you use to connect to the managed nodes has sudo permissions on them. Both the private key of the managed node and the certificate are stored on the control node in the following files: Private key: ~/ <FQDN_of_the_managed_node> .key Certificate: ~/ <FQDN_of_the_managed_node> .crt Procedure Store your sensitive variables in an encrypted file: Create the vault: After the ansible-vault create command opens an editor, enter the sensitive data in the <key> : <value> format: pwd: <password> Save the changes, and close the editor. Ansible encrypts the data in the vault. Create a playbook file, for example ~/playbook.yml , with the following content: --- - name: Installing and configuring PostgreSQL hosts: managed-node-01.example.com vars_files: - vault.yml tasks: - name: Create directory for TLS certificate and key ansible.builtin.file: path: /etc/postgresql/ state: directory mode: 755 - name: Copy CA certificate ansible.builtin.copy: src: "~/{{ inventory_hostname }}.crt" dest: "/etc/postgresql/server.crt" - name: Copy private key ansible.builtin.copy: src: "~/{{ inventory_hostname }}.key" dest: "/etc/postgresql/server.key" mode: 0600 - name: PostgreSQL with an existing private key and certificate ansible.builtin.include_role: name: rhel-system-roles.postgresql vars: postgresql_version: "16" postgresql_password: "{{ pwd }}" postgresql_ssl_enable: true postgresql_cert_name: "/etc/postgresql/server" postgresql_server_conf: listen_addresses: "'*'" password_encryption: scram-sha-256 postgresql_pg_hba_conf: - type: local database: all user: all auth_method: scram-sha-256 - type: hostssl database: all user: all address: '127.0.0.1/32' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '::1/128' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '192.0.2.0/24' auth_method: scram-sha-256 - name: Open the PostgresQL port in firewalld ansible.builtin.include_role: name: rhel-system-roles.firewall vars: firewall: - service: postgresql state: enabled The settings specified in the example playbook include the following: postgresql_version: <version> Sets the version of PostgreSQL to install. The version you can set depends on the PostgreSQL versions that are available in Red Hat Enterprise Linux running on the managed node. You cannot upgrade or downgrade PostgreSQL by changing the postgresql_version variable and running the playbook again. postgresql_password: <password> Sets the password of the postgres database superuser. You cannot change the password by changing the postgresql_password variable and running the playbook again. postgresql_cert_name: <private_key_and_certificate_file> Defines the path and base name of both the certificate and private key on the managed node without .crt and key suffixes. During the PostgreSQL configuration, the role creates symbolic links in the /var/lib/pgsql/data/ directory that refer to these files. The certificate and private key must exist locally on the managed node. You can use tasks with the ansible.builtin.copy module to transfer the files from the control node to the managed node, as shown in the playbook. postgresql_server_conf: <list_of_settings> Defines postgresql.conf settings the role should set. The role adds these settings to the /etc/postgresql/system-roles.conf file and includes this file at the end of /var/lib/pgsql/data/postgresql.conf . Consequently, settings from the postgresql_server_conf variable override settings in /var/lib/pgsql/data/postgresql.conf . Re-running the playbook with different settings in postgresql_server_conf overwrites the /etc/postgresql/system-roles.conf file with the new settings. postgresql_pg_hba_conf: <list_of_authentication_entries> Configures client authentication entries in the /var/lib/pgsql/data/pg_hba.conf file. For details, see see the PostgreSQL documentation. The example allows the following connections to PostgreSQL: Unencrypted connections by using local UNIX domain sockets. TLS-encrypted connections to the IPv4 and IPv6 localhost addresses. TLS-encrypted connections from the 192.0.2.0/24 subnet. Note that access from remote addresses is only possible if you also configure the listen_addresses setting in the postgresql_server_conf variable appropriately. Re-running the playbook with different settings in postgresql_pg_hba_conf overwrites the /var/lib/pgsql/data/pg_hba.conf file with the new settings. For details about all variables used in the playbook, see the /usr/share/ansible/roles/rhel-system-roles.postgresql/README.md file on the control node. Validate the playbook syntax: Note that this command only validates the syntax and does not protect against a wrong but valid configuration. Run the playbook: Verification Use the postgres super user to connect to a PostgreSQL server and execute the \conninfo meta command: If the output displays a TLS protocol version and cipher details, the connection works and TLS encryption is enabled. Additional resources /usr/share/ansible/roles/rhel-system-roles.postgresql/README.md file /usr/share/doc/rhel-system-roles/postgresql/ directory Ansible vault 4.8.2. Configuring PostgreSQL with a TLS certificate issued from IdM by using the postgresql RHEL system role If your application requires a PostgreSQL database server, you can configure the PostgreSQL service with TLS encryption to enable secure communication between the application and the database. If the PostgreSQL host is a member of a Red Hat Enterprise Linux Identity Management (IdM) domain, the certmonger service can manage the certificate request and future renewals. By using the postgresql RHEL system role, you can automate this process. You can remotely install and configure PostgreSQL with TLS encryption, and the postgresql role uses the certificate RHEL system role to configure certmonger and request a certificate from IdM. Note The postgresql role cannot open ports in the firewalld service. To allow remote access to the PostgreSQL server, add a task to your playbook that uses the firewall RHEL system role. Prerequisites You have prepared the control node and the managed nodes You are logged in to the control node as a user who can run playbooks on the managed nodes. The account you use to connect to the managed nodes has sudo permissions on them. You enrolled the managed node in an IdM domain. Procedure Store your sensitive variables in an encrypted file: Create the vault: After the ansible-vault create command opens an editor, enter the sensitive data in the <key> : <value> format: pwd: <password> Save the changes, and close the editor. Ansible encrypts the data in the vault. Create a playbook file, for example ~/playbook.yml , with the following content: --- - name: Installing and configuring PostgreSQL hosts: managed-node-01.example.com vars_files: - vault.yml tasks: - name: PostgreSQL with certificates issued by IdM ansible.builtin.include_role: name: rhel-system-roles.postgresql vars: postgresql_version: "16" postgresql_password: "{{ pwd }}" postgresql_ssl_enable: true postgresql_certificates: - name: postgresql_cert dns: "{{ inventory_hostname }}" ca: ipa principal: "postgresql/{{ inventory_hostname }}@EXAMPLE.COM" postgresql_server_conf: listen_addresses: "'*'" password_encryption: scram-sha-256 postgresql_pg_hba_conf: - type: local database: all user: all auth_method: scram-sha-256 - type: hostssl database: all user: all address: '127.0.0.1/32' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '::1/128' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '192.0.2.0/24' auth_method: scram-sha-256 - name: Open the PostgresQL port in firewalld ansible.builtin.include_role: name: rhel-system-roles.firewall vars: firewall: - service: postgresql state: enabled The settings specified in the example playbook include the following: postgresql_version: <version> Sets the version of PostgreSQL to install. The version you can set depends on the PostgreSQL versions that are available in Red Hat Enterprise Linux running on the managed node. You cannot upgrade or downgrade PostgreSQL by changing the postgresql_version variable and running the playbook again. postgresql_password: <password> Sets the password of the postgres database superuser. You cannot change the password by changing the postgresql_password variable and running the playbook again. postgresql_certificates: <certificate_role_settings> A list of YAML dictionaries with settings for the certificate role. postgresql_server_conf: <list_of_settings> Defines postgresql.conf settings you want the role to set. The role adds these settings to the /etc/postgresql/system-roles.conf file and includes this file at the end of /var/lib/pgsql/data/postgresql.conf . Consequently, settings from the postgresql_server_conf variable override settings in /var/lib/pgsql/data/postgresql.conf . Re-running the playbook with different settings in postgresql_server_conf overwrites the /etc/postgresql/system-roles.conf file with the new settings. postgresql_pg_hba_conf: <list_of_authentication_entries> Configures client authentication entries in the /var/lib/pgsql/data/pg_hba.conf file. For details, see see the PostgreSQL documentation. The example allows the following connections to PostgreSQL: Unencrypted connections by using local UNIX domain sockets. TLS-encrypted connections to the IPv4 and IPv6 localhost addresses. TLS-encrypted connections from the 192.0.2.0/24 subnet. Note that access from remote addresses is only possible if you also configure the listen_addresses setting in the postgresql_server_conf variable appropriately. Re-running the playbook with different settings in postgresql_pg_hba_conf overwrites the /var/lib/pgsql/data/pg_hba.conf file with the new settings. For details about all variables used in the playbook, see the /usr/share/ansible/roles/rhel-system-roles.postgresql/README.md file on the control node. Validate the playbook syntax: Note that this command only validates the syntax and does not protect against a wrong but valid configuration. Run the playbook: Verification Use the postgres super user to connect to a PostgreSQL server and execute the \conninfo meta command: If the output displays a TLS protocol version and cipher details, the connection works and TLS encryption is enabled. Additional resources /usr/share/ansible/roles/rhel-system-roles.postgresql/README.md file /usr/share/doc/rhel-system-roles/postgresql/ directory Ansible vault
[ "dnf install postgresql-server", "dnf module install postgresql:16/server", "postgresql-setup --initdb", "systemctl start postgresql.service", "systemctl enable postgresql.service", "podman login registry.redhat.io", "podman run -d --name <container_name> -e POSTGRESQL_USER= <user_name> -e POSTGRESQL_PASSWORD= <password> -e POSTGRESQL_DATABASE= <database_name> -p <host_port_1> :5432 rhel9/postgresql-13", "podman run -d --name <container_name> -e POSTGRESQL_USER= <user_name> -e POSTGRESQL_PASSWORD= <password> -e POSTGRESQL_DATABASE= <database_name> -p <host_port_2> :5432 rhel9/postgresql-15", "podman run -d --name <container_name> -e POSTGRESQL_USER= <user_name> -e POSTGRESQL_PASSWORD= <password> -e POSTGRESQL_DATABASE= <database_name> -p <host_port_3> :5432 rhel9/postgresql-16", "firewall-cmd --permanent --add-port={ <host_port_1> /tcp, <host_port_2> /tcp, <host_port_3> /tcp,...} firewall-cmd --reload", "podman ps", "psql -u postgres -p -h localhost -P <host_port> --protocol tcp", "postgres=# CREATE USER mydbuser WITH PASSWORD ' mypasswd ' CREATEROLE CREATEDB;", "dnf install postgresql-server", "postgresql-setup --initdb * Initializing database in '/var/lib/pgsql/data' * Initialized, logs are in /var/lib/pgsql/initdb_postgresql.log", "#password_encryption = md5 # md5 or scram-sha-256", "password_encryption = scram-sha-256", "host all all 127.0.0.1/32 ident", "host all all 127.0.0.1/32 scram-sha-256", "systemctl start postgresql.service", "su - postgres", "psql psql (13.7) Type \"help\" for help. postgres=#", "postgres=# \\conninfo You are connected to database \"postgres\" as user \"postgres\" via socket in \"/var/run/postgresql\" at port \"5432\".", "postgres=# CREATE USER mydbuser WITH PASSWORD 'mypasswd' CREATEROLE CREATEDB; CREATE ROLE", "postgres=# \\q", "logout", "psql -U mydbuser -h 127.0.0.1 -d postgres Password for user mydbuser: Type the password. psql (13.7) Type \"help\" for help. postgres=>", "postgres=> CREATE DATABASE mydatabase; CREATE DATABASE postgres=>", "postgres=# \\q", "psql -U mydbuser -h 127.0.0.1 -d mydatabase Password for user mydbuser: psql (13.7) Type \"help\" for help. mydatabase=>", "mydatabase=> \\conninfo You are connected to database \"mydatabase\" as user \"mydbuser\" on host \"127.0.0.1\" at port \"5432\".", "systemctl restart postgresql.service", "This is a comment log_connections = yes log_destination = 'syslog' search_path = '\"USDuser\", public' shared_buffers = 128MB password_encryption = scram-sha-256", "TYPE DATABASE USER ADDRESS METHOD local all all trust host postgres all 192.168.93.0/24 ident host all all .example.com scram-sha-256", "dnf install openssl", "openssl req -new -x509 -days 365 -nodes -text -out server.crt -keyout server.key -subj \"/CN= dbhost.yourdomain.com \"", "cp server.{key,crt} /var/lib/pgsql/data/.", "chown postgres:postgres /var/lib/pgsql/data/server.{key,crt}", "chmod 0400 /var/lib/pgsql/data/server.key", "#password_encryption = md5 # md5 or scram-sha-256", "password_encryption = scram-sha-256", "#ssl = off", "ssl=on", "host all all 127.0.0.1/32 ident", "hostssl all all 127.0.0.1/32 scram-sha-256", "hostssl mydatabase mydbuser 127.0.0.1/32 scram-sha-256", "systemctl restart postgresql.service", "psql -U mydbuser -h 127.0.0.1 -d mydatabase Password for user mydbuser :", "mydbuser=> \\conninfo You are connected to database \"mydatabase\" as user \"mydbuser\" on host \"127.0.0.1\" at port \"5432\". SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off)", "#include <stdio.h> #include <stdlib.h> #include <libpq-fe.h> int main(int argc, char* argv[]) { //Create connection PGconn* connection = PQconnectdb(\"hostaddr=127.0.0.1 password= mypassword port=5432 dbname= mydatabase user= mydbuser \"); if (PQstatus(connection) ==CONNECTION_BAD) { printf(\"Connection error\\n\"); PQfinish(connection); return -1; //Execution of the program will stop here } printf(\"Connection ok\\n\"); //Verify TLS if (PQsslInUse(connection)){ printf(\"TLS in use\\n\"); printf(\"%s\\n\", PQsslAttribute(connection,\"protocol\")); } //End connection PQfinish(connection); printf(\"Disconnected\\n\"); return 0; }", "gcc source_file.c -lpq -o myapplication", "dnf install postgresql-server", "postgresql-setup --initdb * Initializing database in '/var/lib/pgsql/data' * Initialized, logs are in /var/lib/pgsql/initdb_postgresql.log", "dnf install openssl", "openssl req -new -x509 -days 365 -nodes -text -out server.crt -keyout server.key -subj \"/CN= dbhost.yourdomain.com \"", "cp server.{key,crt} /var/lib/pgsql/data/.", "chown postgres:postgres /var/lib/pgsql/data/server.{key,crt}", "chmod 0400 /var/lib/pgsql/data/server.key", "#password_encryption = md5 # md5 or scram-sha-256", "password_encryption = scram-sha-256", "#ssl = off", "ssl=on", "systemctl start postgresql.service", "su - postgres", "psql -U postgres psql (13.7) Type \"help\" for help. postgres=#", "postgres=# CREATE USER mydbuser WITH PASSWORD 'mypasswd'; CREATE ROLE postgres=#", "postgres=# CREATE DATABASE mydatabase; CREATE DATABASE postgres=#", "postgres=# GRANT ALL PRIVILEGES ON DATABASE mydatabase TO mydbuser; GRANT postgres=#", "postgres=# \\q", "logout", "host all all 127.0.0.1/32 ident", "hostssl all all 127.0.0.1/32 scram-sha-256", "systemctl restart postgresql.service", "psql -U mydbuser -h 127.0.0.1 -d mydatabase Password for user mydbuser: psql (13.7) SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, bits: 256, compression: off) Type \"help\" for help. mydatabase=>", "pg_dump dbname > dumpfile", "pg_dumpall > dumpfile", "createdb dbname", "psql dbname < dumpfile", "pg_restore non-plain-text-file", "psql < dumpfile", "pg_dump -h host1 dbname | psql -h host2 dbname", "psql --set ON_ERROR_STOP=on dbname < dumpfile", "psql -1", "pg_restore -e", "postgresql-setup --initdb", "systemctl stop postgresql.service", "tar -cf backup.tar /var/lib/pgsql/data/", "systemctl start postgresql.service", "archive_command = 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'", "test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/00000001000000A900000065 /mnt/server/archivedir/00000001000000A900000065", "systemctl restart postgresql.service", "pg_basebackup -D backup_directory -Fp", "pg_basebackup -D backup_directory -Ft -z", "systemctl stop postgresql.service", "restore_command = 'cp /mnt/server/archivedir/%f \"%p\"'", "systemctl start postgresql.service", "postgres=# CREATE USER mydbuser; postgres=# \\c postgres mydbuser postgres=USD CREATE TABLE mytable (id int);", "postgres=# CREATE USER mydbuser; postgres=# GRANT ALL ON SCHEMA public TO mydbuser; postgres=# \\c postgres mydbuser postgres=USD CREATE TABLE mytable (id int);", "dnf install postgresql-server postgresql-upgrade", "systemctl stop postgresql.service", "postgresql-setup --upgrade", "systemctl start postgresql.service", "su postgres -c '~/analyze_new_cluster.sh'", "su postgres -c 'vacuumdb --all --analyze-in-stages'", "systemctl enable postgresql.service", "systemctl start postgresql.service", "su - postgres -c \"pg_dumpall > ~/pgdump_file.sql\"", "su - postgres -c 'less \"USDHOME/pgdump_file.sql\"'", "dnf install postgresql-server", "postgresql-setup --initdb", "su - postgres -c 'test -e \"USDHOME/pgdump_file.sql\" && echo exists'", "su - postgres -c 'ls -1 USDPGDATA/*.conf'", "systemctl start postgresql.service", "su - postgres -c 'psql -f ~/pgdump_file.sql postgres'", "ansible-vault create vault.yml New Vault password: <vault_password> Confirm New Vault password: <vault_password>", "pwd: <password>", "--- - name: Installing and configuring PostgreSQL hosts: managed-node-01.example.com vars_files: - vault.yml tasks: - name: Create directory for TLS certificate and key ansible.builtin.file: path: /etc/postgresql/ state: directory mode: 755 - name: Copy CA certificate ansible.builtin.copy: src: \"~/{{ inventory_hostname }}.crt\" dest: \"/etc/postgresql/server.crt\" - name: Copy private key ansible.builtin.copy: src: \"~/{{ inventory_hostname }}.key\" dest: \"/etc/postgresql/server.key\" mode: 0600 - name: PostgreSQL with an existing private key and certificate ansible.builtin.include_role: name: rhel-system-roles.postgresql vars: postgresql_version: \"16\" postgresql_password: \"{{ pwd }}\" postgresql_ssl_enable: true postgresql_cert_name: \"/etc/postgresql/server\" postgresql_server_conf: listen_addresses: \"'*'\" password_encryption: scram-sha-256 postgresql_pg_hba_conf: - type: local database: all user: all auth_method: scram-sha-256 - type: hostssl database: all user: all address: '127.0.0.1/32' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '::1/128' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '192.0.2.0/24' auth_method: scram-sha-256 - name: Open the PostgresQL port in firewalld ansible.builtin.include_role: name: rhel-system-roles.firewall vars: firewall: - service: postgresql state: enabled", "ansible-playbook --ask-vault-pass --syntax-check ~/playbook.yml", "ansible-playbook --ask-vault-pass ~/playbook.yml", "psql \"postgresql://[email protected]:5432\" -c '\\conninfo' Password for user postgres: You are connected to database \"postgres\" as user \"postgres\" on host \"192.0.2.1\" at port \"5432\". SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off)", "ansible-vault create vault.yml New Vault password: <vault_password> Confirm New Vault password: <vault_password>", "pwd: <password>", "--- - name: Installing and configuring PostgreSQL hosts: managed-node-01.example.com vars_files: - vault.yml tasks: - name: PostgreSQL with certificates issued by IdM ansible.builtin.include_role: name: rhel-system-roles.postgresql vars: postgresql_version: \"16\" postgresql_password: \"{{ pwd }}\" postgresql_ssl_enable: true postgresql_certificates: - name: postgresql_cert dns: \"{{ inventory_hostname }}\" ca: ipa principal: \"postgresql/{{ inventory_hostname }}@EXAMPLE.COM\" postgresql_server_conf: listen_addresses: \"'*'\" password_encryption: scram-sha-256 postgresql_pg_hba_conf: - type: local database: all user: all auth_method: scram-sha-256 - type: hostssl database: all user: all address: '127.0.0.1/32' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '::1/128' auth_method: scram-sha-256 - type: hostssl database: all user: all address: '192.0.2.0/24' auth_method: scram-sha-256 - name: Open the PostgresQL port in firewalld ansible.builtin.include_role: name: rhel-system-roles.firewall vars: firewall: - service: postgresql state: enabled", "ansible-playbook --ask-vault-pass --syntax-check ~/playbook.yml", "ansible-playbook --ask-vault-pass ~/playbook.yml", "psql \"postgresql://[email protected]:5432\" -c '\\conninfo' Password for user postgres: You are connected to database \"postgres\" as user \"postgres\" on host \"192.0.2.1\" at port \"5432\". SSL connection (protocol: TLSv1.3, cipher: TLS_AES_256_GCM_SHA384, compression: off)" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/configuring_and_using_database_servers/using-postgresql_configuring-and-using-database-servers
Installing on bare metal
Installing on bare metal OpenShift Container Platform 4.13 Installing OpenShift Container Platform on bare metal Red Hat OpenShift Documentation Team
null
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html/installing_on_bare_metal/index
6.3. Converting to an ext3 File System
6.3. Converting to an ext3 File System The tune2fs program can add a journal to an existing ext2 file system without altering the data already on the partition. If the file system is already mounted while it is being transitioned, the journal is visible as the file .journal in the root directory of the file system. If the file system is not mounted, the journal is hidden and does not appear in the file system at all. Note A default installation of Red Hat Enterprise Linux uses ext3 for all file systems. To convert an ext2 file system to ext3, log in as root and type, where <file_system> is an appropriate LVM2 file system. A valid LVM2 file system could be one of two types of entries: A mapped device - A logical volume in a volume group, for example, /dev/mapper/VolGroup00-LogVol02 . A static device - A traditional storage volume, for example, /dev/ hdb X , where hdb is a storage device name and X is the partition number. Issue the df command to display mounted file systems. For more detailed information on the LVM file system, refer to Chapter 8, LVM Configuration . For the remainder of this section, the sample commands use the following value: After doing this, be certain to change the partition type from ext2 to ext3 in the /etc/fstab file. If you are transitioning your root file system, you must use an initrd image (or RAM disk) to boot. To create this, run the mkinitrd program. For information on using the mkinitrd command, type man mkinitrd . Also, make sure your GRUB configuration loads the initrd . If you fail to make this change, the system still boots, but the file system is mounted as ext2 instead of ext3.
[ "/sbin/tune2fs -j <file_system>", "/dev/mapper/VolGroup00-LogVol02" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/4/html/system_administration_guide/the_ext3_file_system-converting_to_an_ext3_file_system
Chapter 1. Logging
Chapter 1. Logging 1.1. Viewing Argo CD logs You can view the Argo CD logs with the logging subsystem for Red Hat OpenShift. The logging subsystem visualizes the logs on a Kibana dashboard. The OpenShift Logging Operator enables logging with Argo CD by default. 1.1.1. Storing and retrieving Argo CD logs You can use the Kibana dashboard to store and retrieve Argo CD logs. Prerequisites The Red Hat OpenShift GitOps Operator is installed on your OpenShift Container Platform cluster. The logging subsystem for Red Hat OpenShift is installed with default configuration on your OpenShift Container Platform cluster. Procedure In the OpenShift Container Platform web console, go to the menu Observability Logging to view the Kibana dashboard. Create an index pattern. To display all the indices, define the index pattern as * , and click step . Select @timestamp for Time Filter field name . Click Create index pattern . In the navigation panel of the Kibana dashboard, click the Discover tab. Create a filter to retrieve logs for Argo CD. The following steps create a filter that retrieves logs for all the pods in the openshift-gitops namespace: Click Add a filter + . Select the kubernetes.namespace_name field. Select the is operator. Select the openshift-gitops value. Click Save . Optional: Add additional filters to narrow the search. For example, to retrieve logs for a particular pod, you can create another filter with kubernetes.pod_name as the field. View the filtered Argo CD logs in the Kibana dashboard. 1.1.2. Additional resources Installing the logging subsystem for Red Hat OpenShift using the web console
null
https://docs.redhat.com/en/documentation/red_hat_openshift_gitops/1.15/html/observability/logging
Chapter 5. Examples and best practices
Chapter 5. Examples and best practices 5.1. Testing the environment Perform the following steps to check if everything is working as expected. Procedure Execute a takeover Change the score of the master nodes to do a failover. In this example, the SAPHana clone resource is rsc_SAPHana_HDB_HDB00-clone , and saphdb3 is one node in the second availability zone: This constraint should be removed again with: Otherwise, pacemaker tries to start HANA on SAPHDB1 . Fence a node You can fence a node with the command: Depending on the other fencing options and the infrastructure used, this node will stay down or come back. kill HANA You can also kill the database to check if the SAP resource agent is working. As sidadm , you can call: Pacemaker detects this issue and resolves it with a solution. 5.2. Useful aliases 5.2.1. Aliases for user root These aliases are added to −/.bashrc : 5.2.2. Aliases for the SIDadm user These aliases are added to ~/.customer.sh : 5.3. Monitoring failover example There are many ways to force a takeover. This example forces a takeover without shutting off a node. The SAP resource agents work with scores to decide which node will promote the SAPHana clone resource. The current status is seen using this command: In this example, the SAPHana clone resource is promoted on saphdb1 . So the primary database runs on saphdb1 . The score of this node is 150 and you can adjust the score of the secondary saphdb3 to force pacemaker to takeover the database to the secondary.
[ "pcs constraint location rsc_SAPHana_HDB_HDB00-clone rule role=master score=100 \\#uname eq saphdb3", "pcs constraint remove rsc_SAPHana_HDB_HDB00", "pcs stonith fence <nodename>", "sidadm% HDB kill", "export ListInstances=USD(/usr/sap/hostctrl/exe/saphostctrl -function ListInstances| head -1 ) export sid=USD(echo \"USDListInstances\" |cut -d \" \" -f 5| tr [A-Z] [a-z]) export SID=USD(echo USDsid | tr [a-z] [A-Z]) export Instance=USD(echo \"USDListInstances\" |cut -d \" \" -f 7 ) alias crmm='watch -n 1 crm_mon -1Arf' alias crmv='watch -n 1 /usr/local/bin/crmmv' alias clean=/usr/local/bin/cleanup alias cglo='su - USD{sid}adm -c cglo' alias cdh='cd /usr/lib/ocf/resource.d/heartbeat' alias vhdbinfo=\"vim /usr/sap/USD{SID}/home/hdbinfo;dcp /usr/sap/USD{SID}/home/hdbinfo\" alias gtr='su - USD{sid}adm -c gtr' alias hdb='su - USD{sid}adm -c hdb' alias hdbi='su - USD{sid}adm -c hdbi' alias hgrep='history | grep USD1' alias hri='su - USD{sid}adm -c hri' alias hris='su - USD{sid}adm -c hris' alias killnode=\"echo 'b' > /proc/sysrq-trigger\" alias lhc='su - USD{sid}adm -c lhc' alias python='/usr/sap/USD{SID}/HDBUSD{Instance}/exe/Python/bin/python' alias pss=\"watch 'pcs status --full | egrep -e Node\\|master\\|clone_state\\|roles'\" alias srstate='su - USD{sid}adm -c srstate' alias shr='watch -n 5 \"SAPHanaSR-monitor --sid=USD{SID}\"' alias sgsi='su - USD{sid}adm -c sgsi' alias spl='su - USD{sid}adm -c spl' alias srs='su - USD{sid}adm -c srs' alias sapstart='su - USD{sid}adm -c sapstart' alias sapstop='su - USD{sid}adm -c sapstop' alias sapmode='df -h /;su - USD{sid}adm -c sapmode' alias smm='pcs property set maintenance-mode=true' alias usmm='pcs property set maintenance-mode=false' alias tma='tmux attach -t 0:' alias tmkill='tmux killw -a' alias tm='tail -100f /var/log/messages |grep -v systemd' alias tms='tail -1000f /var/log/messages | egrep -s \"Setting master-rsc_SAPHana_USD{SID}_HDBUSD{Instance}|sr_register\\ *|WAITING4LPA\\|EXCLUDE as posible takeover node|SAPHanaSR|failed|USD{HOSTNAME} |PROMOTED|DEMOTED|UNDEFINED|master_walk|SWAIT|WaitforStopped|FAILED\"' alias tmss='tail -1000f /var/log/messages | grep -v systemd | egrep -s \"secondary with sync status|Settingmaster-rsc_SAPHana_USD{SID}_HDBUSD{Instance} |sr_register|WAITING4LPA|EXCLUDE as posible takeover node|SAPHanaSR |failed|USD{HOSTNAME}|PROMOTED|DEMOTED|UNDEFINED|master_walk|SWAIT|WaitforStopped|FAILED\"' alias tmm='tail -1000f /var/log/messages | egrep -s \"Settingmaster-rsc_SAPHana_USD{SID}_HDBUSD{Instance}|sr_register |WAITING4LPA|PROMOTED|DEMOTED|UNDEFINED|master_walk|SWAIT|W aitforStopped |FAILED|LPT|SOK|SFAIL|SAPHanaSR-mon\"| grep -v systemd' alias tmsl='tail -1000f /var/log/messages | egrep -s \"Settingmaster-rsc_SAPHana_USD{SID}_HDBUSD{Instance}|sr_register|WAITING4LPA |PROMOTED|DEMOTED|UNDEFINED|ERROR|Warning|mast er_walk|SWAIT |WaitforStopped|FAILED|LPT|SOK|SFAIL|SAPHanaSR-mon\"' alias vih='vim /usr/lib/ocf/resource.d/heartbeat/SAPHanaStart' alias switch1='pcs constraint location rsc_SAPHana_HDB_HDB00-clone rule role=master score=100 \\#uname eq saphdb1' alias switch3='pcs constraint location rsc_SAPHana_HDB_HDB00-clone rule role=master score=100 \\#uname eq saphdb3' alias switch0='pcs constraint remove location-rsc_SAPHana_HDB_HDB00-clone alias switchl='pcs constraint location | grep pcs resource | grep promotable | awk \"{ print USD4 }\"` | grep Constraint| awk \"{ print USDNF }\"' alias scl='pcs constraint location |grep \" Constraint\"'", "alias tm='tail -100f /var/log/messages |grep -v systemd' alias tms='tail -1000f /var/log/messages | egrep -s \"Settingmaster-rsc_SAPHana_USDSAPSYSTEMNAME_HDBUSD{TINSTANCE}|sr_register |WAITING4LPA|EXCLUDE as posible takeover node|SAPHanaSR|failed |USD{HOSTNAME}|PROMOTED|DEMOTED|UNDEFINED|master_walk|SWAIT|WaitforStopped|FAILED\"' alias tmsl='tail -1000f /var/log/messages | egrep -s \"Settingmaster-rsc_SAPHana_USDSAPSYSTEMNAME_HDBUSD{TINSTANCE}|sr_register |WAITING4LPA|PROMOTED|DEMOTED|UNDEFINED|master_walk|SWAIT|WaitforStopped|FAILED|LPT\"' alias sapstart='sapcontrol -nr USD{TINSTANCE} -function StartSystem HDB;hdbi' alias sapstop='sapcontrol -nr USD{TINSTANCE} -function StopSystem HDB;hdbi' alias sapmode='watch -n 5 \"hdbnsutil -sr_state --sapcontrol=1 |grep site.\\*Mode\"' alias sapprim='hdbnsutil -sr_stateConfiguration| grep -i primary' alias sgsi='watch sapcontrol -nr USD{TINSTANCE} -function GetSystemInstanceList' alias spl='watch sapcontrol -nr USD{TINSTANCE} -function GetProcessList' alias splh='watch \"sapcontrol -nr USD{TINSTANCE} -function GetProcessList | grep hdbdaemon\"' alias srs=\"watch -n 5 'python /usr/sap/USDSAPSYSTEMNAME/HDBUSD{TINSTANCE}/exe/python_support/systemReplicationStatus.py * *; echo Status \\USD?'\" alias cdb=\"cd /usr/sap/USD{SAPSYSTEMNAME}/HDBUSD{TINSTANCE}/backup\" alias srstate='watch -n 10 hdbnsutil -sr_state' alias hdb='watch -n 5 \"sapcontrol -nr USD{TINSTANCE} -function GetProcessList | egrep -s hdbdaemon\\|hdbnameserver\\|hdbindexserver \"' alias hdbi='watch -n 5 \"sapcontrol -nr USD{TINSTANCE} -function GetProcessList | egrep -s hdbdaemon\\|hdbnameserver\\|hdbindexserver ;sapcontrol -nr USD{TINSTANCE} -function GetSystemInstanceList \"' alias hgrep='history | grep USD1' alias vglo=\"vim /usr/sap/USDSAPSYSTEMNAME/SYS/global/hdb/custom/config/global.ini\" alias vgloh=\"vim /hana/shared/USD{SAPSYSTEMNAME}/HDBUSD{TINSTANCE}/USD{HOSTNAME}/global.ini\" alias hri='hdbcons -e hdbindexserver \"replication info\"' alias hris='hdbcons -e hdbindexserver \"replication info\" | egrep -e \"SiteID|ReplicationStatus_\"' alias gtr='watch -n 10 /usr/sap/USDSAPSYSTEMNAME/HDBUSD{TINSTANCE}/exe/Python/bin/python /usr/sap/USDSAPSYSTEMNAME/HDBUSD{TINSTANCE}/exe/python_support/getTakeoverRecommendation.py --sapcontrol=1' alias lhc='/usr/sap/USDSAPSYSTEMNAME/HDBUSD{TINSTANCE}/exe/Python/bin/python /usr/sap/USDSAPSYSTEMNAME/HDBUSD{TINSTANCE}/exe/python_support/landscapeHostConfiguration.py ;echo USD?' alias reg1='hdbnsutil -sr_register --remoteHost=hana07 -remoteInstance=USD{TINSTANCE} --replicationMode=syncmem --name=DC3 --remoteName=DC1 --operationMode=logreplay --online' alias reg2='hdbnsutil -sr_register --remoteHost=hana08 -remoteInstance=USD{TINSTANCE} --replicationMode=syncmem --name=DC3 --remoteName=DC2 --operationMode=logreplay --online' alias reg3='hdbnsutil -sr_register --remoteHost=hana09 -remoteInstance=USD{TINSTANCE} --replicationMode=syncmem --name=DC3 --remoteName=DC3 --operationMode=logreplay --online' PS1=\"\\[\\033[m\\][\\[\\e[1;33m\\]\\u\\[\\e[1;33m\\]\\[\\033[m\\]@\\[\\e[1;36m\\]\\h\\[\\033[m\\]: \\[\\e[0m\\]\\[\\e[1;32m\\]\\W\\[\\e[0m\\]]# \"", "alias pss='pcs status --full | egrep -e \"Node|master|clone_state|roles\"' [root@saphdb2:~]# pss Node List: Node Attributes: * Node: saphdb1 (1): * hana_hdb_clone_state : PROMOTED * hana_hdb_roles : master1:master:worker:master * master-rsc_SAPHana_HDB_HDB00 : 150 * Node: saphdb2 (2): * hana_hdb_clone_state : DEMOTED * hana_hdb_roles : slave:slave:worker:slave * master-rsc_SAPHana_HDB_HDB00 : -10000 * Node: saphdb3 (3): * hana_hdb_clone_state : DEMOTED * hana_hdb_roles : master1:master:worker:master * master-rsc_SAPHana_HDB_HDB00 : 100 * Node: saphdb4 (4): * hana_hdb_clone_state : DEMOTED * hana_hdb_roles : slave:slave:worker:slave * master-rsc_SAPHana_HDB_HDB00 : -12200" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_for_sap_solutions/8/html/automating_sap_hana_scale-out_system_replication_using_the_rhel_ha_add-on/asmb_ex_automating-sap-hana-scale-out
14.6. Editing a Guest Virtual Machine's configuration file
14.6. Editing a Guest Virtual Machine's configuration file Instead of using the dumpxml option (refer to Section 14.5.23, "Creating a Virtual Machine XML Dump (Configuration File)" ), guest virtual machines can be edited either while they are running or while they are offline. The virsh edit command provides this functionality. For example, to edit the guest virtual machine named rhel6 : This opens a text editor. The default text editor is the USDEDITOR shell parameter (set to vi by default). 14.6.1. Adding Multifunction PCI Devices to KVM Guest Virtual Machines This section will demonstrate how to add multi-function PCI devices to KVM guest virtual machines. Run the virsh edit [guestname] command to edit the XML configuration file for the guest virtual machine. In the address type tag, add a multifunction='on' entry for function='0x0' . This enables the guest virtual machine to use the multifunction PCI devices. For a PCI device with two functions, amend the XML configuration file to include a second device with the same slot number as the first device and a different function number, such as function='0x1' . For Example: lspci output from the KVM guest virtual machine shows:
[ "virsh edit rhel6", "<disk type='file' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source file='/var/lib/libvirt/images/rhel62-1.img'/> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0' multifunction='on'/ </disk>", "<disk type='file' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source file='/var/lib/libvirt/images/rhel62-1.img'/> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0' multifunction='on'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source file='/var/lib/libvirt/images/rhel62-2.img'/> <target dev='vdb' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x1'/> </disk>", "lspci 00:05.0 SCSI storage controller: Red Hat, Inc Virtio block device 00:05.1 SCSI storage controller: Red Hat, Inc Virtio block device" ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/virtualization_administration_guide/sect-Managing_guest_virtual_machines_with_virsh-Editing_a_guest_virtual_machines_configuration_file
Chapter 2. Managing Puppet modules
Chapter 2. Managing Puppet modules 2.1. Installing a Puppet module on Satellite server You can install a pre-built Puppet module from the Puppet Forge. The Puppet Forge is a repository that provides Puppet modules contributed by the community. Puppet modules flagged as supported are officially supported and tested by Puppet Inc. This example shows how to add the ntp module to hosts. Procedure Navigate to forge.puppet.com and search for ntp . One of the first modules is puppetlabs/ntp . Connect to your Satellite Server using SSH and install the Puppet module: Use the -i parameter to specify the path and Puppet environment, for example production . Once the installation is completed, the output looks as follows: An alternative way to install a Puppet module is to copy a folder containing the Puppet module to the module path as mentioned above. Ensure to resolve its dependencies manually. 2.2. Updating a Puppet module You can update an existing Puppet module using the puppet command. Procedure Connect to your Puppet server using SSH and find out where the Puppet modules are located: This returns output as follows: If the module is located in the path as displayed above, the following command updates a module:
[ "puppet module install puppetlabs-ntp -i /etc/puppetlabs/code/environments/production/modules", "Notice: Preparing to install into /etc/puppetlabs/code/environments/production/modules Notice: Created target directory /etc/puppetlabs/code/environments/production/modules Notice: Downloading from https://forgeapi.puppet.com Notice: Installing -- do not interrupt /etc/puppetlabs/code/environments/production/modules |-| puppetlabs-ntp (v8.3.0) |-- puppetlabs-stdlib (v4.25.1) [/etc/puppetlabs/code/environments/production/modules]", "puppet config print modulepath", "/etc/puppetlabs/code/environments/production/modules:/etc/puppetlabs/code/environments/common:/etc/puppetlabs/code/modules:/opt/puppetlabs/puppet/modules:/usr/share/puppet/modules", "puppet module upgrade module name" ]
https://docs.redhat.com/en/documentation/red_hat_satellite/6.15/html/managing_configurations_using_puppet_integration/managing-puppet-modules_managing-configurations-puppet
Part II. Using the sssd Container
Part II. Using the sssd Container This part covers how to deploy, configure, update and uninstall the SSSD container on Atomic Host. In addition, this documentation explains how to grant or restrict access to SSSD containers and how to create and use a centralized Kerberos credential cache.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/using_containerized_identity_management_services/using-the-sssd-container
Chapter 2. Topic configuration properties
Chapter 2. Topic configuration properties Chapter 2. Topic configuration properties
null
https://docs.redhat.com/en/documentation/red_hat_streams_for_apache_kafka/2.5/html/kafka_configuration_properties/topic-configuration-properties-str
Chapter 8. Executing your content with Automation content navigator
Chapter 8. Executing your content with Automation content navigator Now that you have your automation execution environments built, you can use automation content navigator to validate that the content is run in the same manner as the automation controller will run it. 8.1. Running Ansible playbooks with Automation content navigator As a content creator, you can run your Ansible Playbooks with Automation content navigator and interactively delve into the results of each play and task to verify or troubleshoot the playbook. You can also run your Ansible Playbooks inside an execution environment and without an execution environment to compare and troubleshoot any problems. 8.1.1. Executing a playbook from Automation content navigator You can run Ansible playbooks with the Automation content navigator text-based user interface to follow the execution of the tasks and delve into the results of each task. Prerequisites A playbook. A valid inventory file if not using localhost or an inventory plugin. Procedure Start Automation content navigator USD ansible-navigator Run the playbook. USD :run Optional: type ansible-navigator run simple-playbook.yml -i inventory.yml to run the playbook. Verify or add the inventory and any other command line parameters. INVENTORY OR PLAYBOOK NOT FOUND, PLEASE CONFIRM THE FOLLOWING ───────────────────────────────────────────────────────────────────────── Path to playbook: /home/ansible-navigator_demo/simple_playbook.yml Inventory source: /home/ansible-navigator-demo/inventory.yml Additional command line parameters: Please provide a value (optional) ────────────────────────────────────────────────────────────────────────── Submit Cancel Tab to Submit and hit Enter. You should see the tasks executing. Type the number to a play to step into the play results, or type :<number> for numbers above 9. Notice failed tasks show up in red if you have colors enabled for Automation content navigator. Type the number to a task to review the task results, or type :<number> for numbers above 9. Optional: type :doc bring up the documentation for the module or plugin used in the task to aid in troubleshooting. ANSIBLE.BUILTIN.PACKAGE_FACTS (MODULE) 0│--- 1│doc: 2│ author: 3│ - Matthew Jones (@matburt) 4│ - Brian Coca (@bcoca) 5│ - Adam Miller (@maxamillion) 6│ collection: ansible.builtin 7│ description: 8│ - Return information about installed packages as facts. <... output omitted ...> 11│ module: package_facts 12│ notes: 13│ - Supports C(check_mode). 14│ options: 15│ manager: 16│ choices: 17│ - auto 18│ - rpm 19│ - apt 20│ - portage 21│ - pkg 22│ - pacman <... output truncated ...> Additional resources ansible-playbook . Introduction to Ansible playbooks . 8.1.2. Reviewing playbook results with an Automation content navigator artifact file Automation content navigator saves the results of the playbook run in a JSON artifact file. You can use this file to share the playbook results with someone else, save it for security or compliance reasons, or review and troubleshoot later. You only need the artifact file to review the playbook run. You do not need access to the playbook itself or inventory access. Prerequisites A Automation content navigator artifact JSON file from a playbook run. Procedure Start Automation content navigator with the artifact file. USD ansible-navigator replay simple_playbook_artifact.json Review the playbook results that match when the playbook ran. You can now type the number to the plays and tasks to step into each to review the results, as you would after executing the playbook. Additional resources ansible-playbook . Introduction to Ansible playbooks .
[ "ansible-navigator", ":run", "INVENTORY OR PLAYBOOK NOT FOUND, PLEASE CONFIRM THE FOLLOWING ───────────────────────────────────────────────────────────────────────── Path to playbook: /home/ansible-navigator_demo/simple_playbook.yml Inventory source: /home/ansible-navigator-demo/inventory.yml Additional command line parameters: Please provide a value (optional) ────────────────────────────────────────────────────────────────────────── Submit Cancel", "ANSIBLE.BUILTIN.PACKAGE_FACTS (MODULE) 0│--- 1│doc: 2│ author: 3│ - Matthew Jones (@matburt) 4│ - Brian Coca (@bcoca) 5│ - Adam Miller (@maxamillion) 6│ collection: ansible.builtin 7│ description: 8│ - Return information about installed packages as facts. <... output omitted ...> 11│ module: package_facts 12│ notes: 13│ - Supports C(check_mode). 14│ options: 15│ manager: 16│ choices: 17│ - auto 18│ - rpm 19│ - apt 20│ - portage 21│ - pkg 22│ - pacman <... output truncated ...>", "ansible-navigator replay simple_playbook_artifact.json" ]
https://docs.redhat.com/en/documentation/red_hat_ansible_automation_platform/2.3/html/red_hat_ansible_automation_platform_creator_guide/executing-content-navigator
Chapter 15. Configuring Language and Installation Source
Chapter 15. Configuring Language and Installation Source Before the graphical installation program starts, you need to configure the language and installation source. 15.1. The Text Mode Installation Program User Interface Important We recommend that you install Red Hat Enterprise Linux using the graphical interface. If you are installing Red Hat Enterprise Linux on a system that lacks a graphical display, consider performing the installation over a VNC connection - see Chapter 31, Installing Through VNC . If anaconda detects that you are installing in text mode on a system where installation over a VNC connection might be possible, anaconda asks you to verify your decision to install in text mode even though your options during installation are limited. If your system has a graphical display, but graphical installation fails, try booting with the xdriver=vesa option - refer to Chapter 28, Boot Options Both the loader and later anaconda use a screen-based interface that includes most of the on-screen widgets commonly found on graphical user interfaces. Figure 15.1, "Installation Program Widgets as seen in URL Setup " , and Figure 15.2, "Installation Program Widgets as seen in Choose a Language " , illustrate widgets that appear on screens during the installation process. Figure 15.1. Installation Program Widgets as seen in URL Setup Figure 15.2. Installation Program Widgets as seen in Choose a Language The widgets include: Window - Windows (usually referred to as dialogs in this manual) appear on your screen throughout the installation process. At times, one window may overlay another; in these cases, you can only interact with the window on top. When you are finished in that window, it disappears, allowing you to continue working in the window underneath. Checkbox - Checkboxes allow you to select or deselect a feature. The box displays either an asterisk (selected) or a space (unselected). When the cursor is within a checkbox, press Space to select or deselect a feature. Text Input - Text input lines are regions where you can enter information required by the installation program. When the cursor rests on a text input line, you may enter and/or edit information on that line. Text Widget - Text widgets are regions of the screen for the display of text. At times, text widgets may also contain other widgets, such as checkboxes. If a text widget contains more information than can be displayed in the space reserved for it, a scroll bar appears; if you position the cursor within the text widget, you can then use the Up and Down arrow keys to scroll through all the information available. Your current position is shown on the scroll bar by a # character, which moves up and down the scroll bar as you scroll. Scroll Bar - Scroll bars appear on the side or bottom of a window to control which part of a list or document is currently in the window's frame. The scroll bar makes it easy to move to any part of a file. Button Widget - Button widgets are the primary method of interacting with the installation program. You progress through the windows of the installation program by navigating these buttons, using the Tab and Enter keys. Buttons can be selected when they are highlighted. Cursor - Although not a widget, the cursor is used to select (and interact with) a particular widget. As the cursor is moved from widget to widget, it may cause the widget to change color, or the cursor itself may only appear positioned in or to the widget. In Figure 15.1, "Installation Program Widgets as seen in URL Setup " , the cursor is positioned on the Enable HTTP proxy checkbox. Figure 8.2, "Installation Program Widgets as seen in Choose a Language " , shows the cursor on the OK button. 15.1.1. Using the Keyboard to Navigate Navigation through the installation dialogs is performed through a simple set of keystrokes. To move the cursor, use the Left , Right , Up , and Down arrow keys. Use Tab , and Shift - Tab to cycle forward or backward through each widget on the screen. Along the bottom, most screens display a summary of available cursor positioning keys. To "press" a button, position the cursor over the button (using Tab , for example) and press Space or Enter . To select an item from a list of items, move the cursor to the item you wish to select and press Enter . To select an item with a checkbox, move the cursor to the checkbox and press Space to select an item. To deselect, press Space a second time. Pressing F12 accepts the current values and proceeds to the dialog; it is equivalent to pressing the OK button. Warning Unless a dialog box is waiting for your input, do not press any keys during the installation process (doing so may result in unpredictable behavior).
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/installation_guide/ch-Installation_Phase_2-ppc
Chapter 5. Observability UI plugins
Chapter 5. Observability UI plugins 5.1. Observability UI plugins overview You can use the Cluster Observability Operator (COO) to install and manage UI plugins to enhance the observability capabilities of the OpenShift Container Platform web console. The plugins extend the default functionality, providing new UI features for troubleshooting, distributed tracing, and cluster logging. 5.1.1. Cluster logging The logging UI plugin surfaces logging data in the web console on the Observe Logs page. You can specify filters, queries, time ranges and refresh rates. The results displayed a list of collapsed logs, which can then be expanded to show more detailed information for each log. For more information, see the logging UI plugin page. 5.1.2. Troubleshooting Important The Cluster Observability Operator troubleshooting panel UI plugin is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . The troubleshooting panel UI plugin for OpenShift Container Platform version 4.16+ provides observability signal correlation, powered by the open source Korrel8r project. You can use the troubleshooting panel available from the Observe Alerting page to easily correlate metrics, logs, alerts, netflows, and additional observability signals and resources, across different data stores. Users of OpenShift Container Platform version 4.17+ can also access the troubleshooting UI panel from the Application Launcher . The output of Korrel8r is displayed as an interactive node graph. When you click on a node, you are automatically redirected to the corresponding web console page with the specific information for that node, for example, metric, log, or pod. For more information, see the troubleshooting UI plugin page. 5.1.3. Distributed tracing Important The Cluster Observability Operator distributed tracing UI plugin is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . The distributed tracing UI plugin adds tracing-related features to the web console on the Observe Traces page. You can follow requests through the front end and into the backend of microservices, helping you identify code errors and performance bottlenecks in distributed systems. You can select a supported TempoStack or TempoMonolithic multi-tenant instance running in the cluster and set a time range and query to view the trace data. For more information, see the distributed tracing UI plugin page. 5.2. Logging UI plugin The logging UI plugin surfaces logging data in the OpenShift Container Platform web console on the Observe Logs page. You can specify filters, queries, time ranges and refresh rates, with the results displayed as a list of collapsed logs, which can then be expanded to show more detailed information for each log. When you have also deployed the Troubleshooting UI plugin on OpenShift Container Platform version 4.16+, it connects to the Korrel8r service and adds direct links from the Administration perspective, from the Observe Logs page, to the Observe Metrics page with a correlated PromQL query. It also adds a See Related Logs link from the Administration perspective alerting detail page, at Observe Alerting , to the Observe Logs page with a correlated filter set selected. The features of the plugin are categorized as: dev-console Adds the logging view to the Developer perspective. alerts Merges the web console alerts with log-based alerts defined in the Loki ruler. Adds a log-based metrics chart in the alert detail view. dev-alerts Merges the web console alerts with log-based alerts defined in the Loki ruler. Adds a log-based metrics chart in the alert detail view for the Developer perspective. For Cluster Observability Operator (COO) versions, the support for these features in OpenShift Container Platform versions is shown in the following table: COO version OCP versions Features 0.3.0+ 4.12 dev-console 0.3.0+ 4.13 dev-console , alerts 0.3.0+ 4.14+ dev-console , alerts , dev-alerts 5.2.1. Installing the Cluster Observability Operator logging UI plugin Prerequisites You have access to the cluster as a user with the cluster-admin role. You have logged in to the OpenShift Container Platform web console. You have installed the Cluster Observability Operator. You have a LokiStack instance in your cluster. Procedure In the OpenShift Container Platform web console, click Operators Installed Operators and select Cluster Observability Operator. Choose the UI Plugin tab (at the far right of the tab list) and click Create UIPlugin . Select YAML view , enter the following content, and then click Create : apiVersion: observability.openshift.io/v1alpha1 kind: UIPlugin metadata: name: logging spec: type: Logging logging: lokiStack: name: logging-loki logsLimit: 50 timeout: 30s 5.3. Distributed tracing UI plugin Important The Cluster Observability Operator distributed tracing UI plugin is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . The distributed tracing UI plugin adds tracing-related features to the Administrator perspective of the OpenShift web console at Observe Traces . You can follow requests through the front end and into the backend of microservices, helping you identify code errors and performance bottlenecks in distributed systems. 5.3.1. Installing the Cluster Observability Operator distributed tracing UI plugin Prerequisites You have access to the cluster as a user with the cluster-admin cluster role. You have logged in to the OpenShift Container Platform web console. You have installed the Cluster Observability Operator Procedure In the OpenShift Container Platform web console, click Operators Installed Operators and select Cluster Observability Operator Choose the UI Plugin tab (at the far right of the tab list) and press Create UIPlugin Select YAML view , enter the following content, and then press Create : apiVersion: observability.openshift.io/v1alpha1 kind: UIPlugin metadata: name: distributed-tracing spec: type: DistributedTracing 5.3.2. Using the Cluster Observability Operator distributed tracing UI plugin Prerequisites You have access to the cluster as a user with the cluster-admin cluster role. You have logged in to the OpenShift Container Platform web console. You have installed the Cluster Observability Operator. You have installed the Cluster Observability Operator distributed tracing UI plugin. You have a TempoStack or TempoMonolithic multi-tenant instance in the cluster. Procedure In the Administrator perspective of the OpenShift Container Platform web console, click Observe Traces . Select a TempoStack or TempoMonolithic multi-tenant instance and set a time range and query for the traces to be loaded. The traces are displayed on a scatter-plot showing the trace start time, duration, and number of spans. Underneath the scatter plot, there is a list of traces showing information such as the Trace Name , number of Spans , and Duration . Click on a trace name link. The trace detail page for the selected trace contains a Gantt Chart of all of the spans within the trace. Select a span to show a breakdown of the configured attributes. 5.4. Troubleshooting UI plugin Important The Cluster Observability Operator troubleshooting panel UI plugin is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . The troubleshooting UI plugin for OpenShift Container Platform version 4.16+ provides observability signal correlation, powered by the open source Korrel8r project. With the troubleshooting panel that is available under Observe Alerting , you can easily correlate metrics, logs, alerts, netflows, and additional observability signals and resources, across different data stores. Users of OpenShift Container Platform version 4.17+ can also access the troubleshooting UI panel from the Application Launcher . When you install the troubleshooting UI plugin, a Korrel8r service named korrel8r is deployed in the same namespace, and it is able to locate related observability signals and Kubernetes resources from its correlation engine. The output of Korrel8r is displayed in the form of an interactive node graph in the OpenShift Container Platform web console. Nodes in the graph represent a type of resource or signal, while edges represent relationships. When you click on a node, you are automatically redirected to the corresponding web console page with the specific information for that node, for example, metric, log, pod. 5.4.1. Installing the Cluster Observability Operator Troubleshooting UI plugin Prerequisites You have access to the cluster as a user with the cluster-admin cluster role. You have logged in to the OpenShift Container Platform web console. You have installed the Cluster Observability Operator Procedure In the OpenShift Container Platform web console, click Operators Installed Operators and select Cluster Observability Operator Choose the UI Plugin tab (at the far right of the tab list) and press Create UIPlugin Select YAML view , enter the following content, and then press Create : apiVersion: observability.openshift.io/v1alpha1 kind: UIPlugin metadata: name: troubleshooting-panel spec: type: TroubleshootingPanel 5.4.2. Using the Cluster Observability Operator troubleshooting UI plugin Prerequisites You have access to the OpenShift Container Platform cluster as a user with the cluster-admin cluster role. If your cluster version is 4.17+, you can access the troubleshooting UI panel from the Application Launcher . You have logged in to the OpenShift Container Platform web console. You have installed OpenShift Container Platform Logging, if you want to visualize correlated logs. You have installed OpenShift Container Platform Network Observability, if you want to visualize correlated netflows. You have installed the Cluster Observability Operator. You have installed the Cluster Observability Operator troubleshooting UI plugin. Note The troubleshooting panel relies on the observability signal stores installed in your cluster. Kuberenetes resources, alerts and metrics are always available by default in an OpenShift Container Platform cluster. Other signal types require optional components to be installed: Logs: Red Hat Openshift Logging (collection) and Loki Operator provided by Red Hat (store) Network events: Network observability provided by Red Hat (collection) and Loki Operator provided by Red Hat (store) Procedure In the admin perspective of the web console, navigate to Observe Alerting and then select an alert. If the alert has correlated items, a Troubleshooting Panel link will appear above the chart on the alert detail page. Click on the Troubleshooting Panel link to display the panel. The panel consists of query details and a topology graph of the query results. The selected alert is converted into a Korrel8r query string and sent to the korrel8r service. The results are displayed as a graph network connecting the returned signals and resources. This is a neighbourhood graph, starting at the current resource and including related objects up to 3 steps away from the starting point. Clicking on nodes in the graph takes you to the corresponding web console pages for those resouces. You can use the troubleshooting panel to find resources relating to the chosen alert. Note Clicking on a node may sometimes show fewer results than indicated on the graph. This is a known issue that will be addressed in a future release. Alert (1): This node is the starting point in the graph and represents the KubeContainerWaiting alert displayed in the web console. Pod (1): This node indicates that there is a single Pod resource associated with this alert. Clicking on this node will open a console search showing the related pod directly. Event (2): There are two Kuberenetes events associated with the pod. Click this node to see the events. Logs (74): This pod has 74 lines of logs, which you can access by clicking on this node. Metrics (105): There are many metrics associated with the pod. Network (6): There are network events, meaning the pod has communicated over the network. The remaining nodes in the graph represent the Service , Deployment and DaemonSet resources that the pod has communicated with. Focus: Clicking this button updates the graph. By default, the graph itself does not change when you click on nodes in the graph. Instead, the main web console page changes, and you can then navigate to other resources using links on the page, while the troubleshooting panel itself stays open and unchanged. To force an update to the graph in the troubleshooting panel, click Focus . This draws a new graph, using the current resource in the web console as the starting point. Show Query: Clicking this button enables some experimental features: Hide Query hides the experimental features. The query that identifies the starting point for the graph. The query language, part of the Korrel8r correlation engine used to create the graphs, is experimental and may change in future. The query is updated by the Focus button to correspond to the resources in the main web console window. Neighbourhood depth is used to display a smaller or larger neighbourhood. Note Setting a large value in a large cluster might cause the query to fail, if the number of results is too big. Goal class results in a goal directed search instead of a neighbourhood search. A goal directed search shows all paths from the starting point to the goal class, which indicates a type of resource or signal. The format of the goal class is experimental and may change. Currently, the following goals are valid: k8s: RESOURCE[VERSION.[GROUP]] identifying a kind of kuberenetes resource. For example k8s:Pod or k8s:Deployment.apps.v1 . alert:alert representing any alert. metric:metric representing any metric. netflow:network representing any network observability network event. log: LOG_TYPE representing stored logs, where LOG_TYPE must be one of application , infrastructure or audit . 5.4.3. Creating the example alert To trigger an alert as a starting point to use in the troubleshooting UI panel, you can deploy a container that is deliberately misconfigured. Procedure Use the following YAML, either from the command line or in the web console, to create a broken deployment in a system namespace: apiVersion: apps/v1 kind: Deployment metadata: name: bad-deployment namespace: default 1 spec: selector: matchLabels: app: bad-deployment template: metadata: labels: app: bad-deployment spec: containers: 2 - name: bad-deployment image: quay.io/openshift-logging/vector:5.8 1 The deployment must be in a system namespace (such as default ) to cause the desired alerts. 2 This container deliberately tries to start a vector server with no configuration file. The server logs a few messages, and then exits with an error. Alternatively, you can deploy any container you like that is badly configured, causing it to trigger an alert. View the alerts: Go to Observe Alerting and click clear all filters . View the Pending alerts. Important Alerts first appear in the Pending state. They do not start Firing until the container has been crashing for some time. By viewing Pending alerts, you do not have to wait as long to see them occur. Choose one of the KubeContainerWaiting , KubePodCrashLooping , or KubePodNotReady alerts and open the troubleshooting panel by clicking on the link. Alternatively, if the panel is already open, click the "Focus" button to update the graph.
[ "apiVersion: observability.openshift.io/v1alpha1 kind: UIPlugin metadata: name: logging spec: type: Logging logging: lokiStack: name: logging-loki logsLimit: 50 timeout: 30s", "apiVersion: observability.openshift.io/v1alpha1 kind: UIPlugin metadata: name: distributed-tracing spec: type: DistributedTracing", "apiVersion: observability.openshift.io/v1alpha1 kind: UIPlugin metadata: name: troubleshooting-panel spec: type: TroubleshootingPanel", "apiVersion: apps/v1 kind: Deployment metadata: name: bad-deployment namespace: default 1 spec: selector: matchLabels: app: bad-deployment template: metadata: labels: app: bad-deployment spec: containers: 2 - name: bad-deployment image: quay.io/openshift-logging/vector:5.8" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/cluster_observability_operator/observability-ui-plugins
7.5. Exporting Templates
7.5. Exporting Templates 7.5.1. Migrating Templates to the Export Domain Note The export storage domain is deprecated. Storage data domains can be unattached from a data center and imported to another data center in the same environment, or in a different environment. Virtual machines, floating virtual disks, and templates can then be uploaded from the imported storage domain to the attached data center. See the Importing Existing Storage Domains section in the Red Hat Virtualization Administration Guide for information on importing storage domains. Export templates into the export domain to move them to another data domain, either in the same Red Hat Virtualization environment, or another one. This procedure requires access to the Administration Portal. Exporting Individual Templates to the Export Domain Click Compute Templates and select a template. Click Export . Select the Force Override check box to replace any earlier version of the template on the export domain. Click OK to begin exporting the template; this may take up to an hour, depending on the virtual disk size and your storage hardware. Repeat these steps until the export domain contains all the templates to migrate before you start the import process. Click Storage Domains and select the export domain. Click the domain name to see the details view. Click the Template Import tab to view all exported templates in the export domain. 7.5.2. Copying a Template's Virtual Hard Disk If you are moving a virtual machine that was created from a template with the thin provisioning storage allocation option selected, the template's disks must be copied to the same storage domain as that of the virtual disk. This procedure requires access to the Administration Portal. Copying a Virtual Hard Disk Click Storage Disks . Select the template disk(s) to copy. Click Copy . Select the Target data domain from the drop-down list(s). Click OK . A copy of the template's virtual hard disk has been created, either on the same, or a different, storage domain. If you were copying a template disk in preparation for moving a virtual hard disk, you can now move the virtual hard disk.
null
https://docs.redhat.com/en/documentation/red_hat_virtualization/4.3/html/virtual_machine_management_guide/sect-exporting_templates
Chapter 86. volume
Chapter 86. volume This chapter describes the commands under the volume command. 86.1. volume backup create Create new volume backup Usage: Table 86.1. Positional arguments Value Summary <volume> Volume to backup (name or id) Table 86.2. Command arguments Value Summary -h, --help Show this help message and exit --name <name> Name of the backup --description <description> Description of the backup --container <container> Optional backup container name --snapshot <snapshot> Snapshot to backup (name or id) --force Allow to back up an in-use volume --incremental Perform an incremental backup Table 86.3. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.4. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.5. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.6. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.2. volume backup delete Delete volume backup(s) Usage: Table 86.7. Positional arguments Value Summary <backup> Backup(s) to delete (name or id) Table 86.8. Command arguments Value Summary -h, --help Show this help message and exit --force Allow delete in state other than error or available 86.3. volume backup list List volume backups Usage: Table 86.9. Command arguments Value Summary -h, --help Show this help message and exit --long List additional fields in output --name <name> Filters results by the backup name --status <status> Filters results by the backup status ( creating , available , deleting , error , restoring or error_restoring ) --volume <volume> Filters results by the volume which they backup (name or ID) --marker <volume-backup> The last backup of the page (name or id) --limit <num-backups> Maximum number of backups to display --all-projects Include all projects (admin only) Table 86.10. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.11. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.12. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.13. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.4. volume backup restore Restore volume backup Usage: Table 86.14. Positional arguments Value Summary <backup> Backup to restore (name or id) <volume> Volume to restore to (name or id) Table 86.15. Command arguments Value Summary -h, --help Show this help message and exit Table 86.16. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.17. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.18. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.19. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.5. volume backup set Set volume backup properties Usage: Table 86.20. Positional arguments Value Summary <backup> Backup to modify (name or id) Table 86.21. Command arguments Value Summary -h, --help Show this help message and exit --name <name> New backup name --description <description> New backup description --state <state> New backup state ("available" or "error") (admin only) (This option simply changes the state of the backup in the database with no regard to actual status, exercise caution when using) 86.6. volume backup show Display volume backup details Usage: Table 86.22. Positional arguments Value Summary <backup> Backup to display (name or id) Table 86.23. Command arguments Value Summary -h, --help Show this help message and exit Table 86.24. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.25. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.26. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.27. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.7. volume create Create new volume Usage: Table 86.28. Positional arguments Value Summary <name> Volume name Table 86.29. Command arguments Value Summary -h, --help Show this help message and exit --size <size> Volume size in gb (required unless --snapshot or --source is specified) --type <volume-type> Set the type of volume --image <image> Use <image> as source of volume (name or id) --snapshot <snapshot> Use <snapshot> as source of volume (name or id) --source <volume> Volume to clone (name or id) --description <description> Volume description --availability-zone <availability-zone> Create volume in <availability-zone> --consistency-group consistency-group> Consistency group where the new volume belongs to --property <key=value> Set a property to this volume (repeat option to set multiple properties) --hint <key=value> Arbitrary scheduler hint key-value pairs to help boot an instance (repeat option to set multiple hints) --bootable Mark volume as bootable --non-bootable Mark volume as non-bootable (default) --read-only Set volume to read-only access mode --read-write Set volume to read-write access mode (default) Table 86.30. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.31. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.32. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.33. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.8. volume delete Delete volume(s) Usage: Table 86.34. Positional arguments Value Summary <volume> Volume(s) to delete (name or id) Table 86.35. Command arguments Value Summary -h, --help Show this help message and exit --force Attempt forced removal of volume(s), regardless of state (defaults to False) --purge Remove any snapshots along with volume(s) (defaults to false) 86.9. volume host set Set volume host properties Usage: Table 86.36. Positional arguments Value Summary <host-name> Name of volume host Table 86.37. Command arguments Value Summary -h, --help Show this help message and exit --disable Freeze and disable the specified volume host --enable Thaw and enable the specified volume host 86.10. volume list List volumes Usage: Table 86.38. Command arguments Value Summary -h, --help Show this help message and exit --project <project> Filter results by project (name or id) (admin only) --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. --user <user> Filter results by user (name or id) (admin only) --user-domain <user-domain> Domain the user belongs to (name or id). this can be used in case collisions between user names exist. --name <name> Filter results by volume name --status <status> Filter results by status --all-projects Include all projects (admin only) --long List additional fields in output --marker <volume> The last volume id of the page --limit <num-volumes> Maximum number of volumes to display Table 86.39. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.40. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.41. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.42. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.11. volume migrate Migrate volume to a new host Usage: Table 86.43. Positional arguments Value Summary <volume> Volume to migrate (name or id) Table 86.44. Command arguments Value Summary -h, --help Show this help message and exit --host <host> Destination host (takes the form: host@backend-name#pool) --force-host-copy Enable generic host-based force-migration, which bypasses driver optimizations --lock-volume If specified, the volume state will be locked and will not allow a migration to be aborted (possibly by another operation) 86.12. volume qos associate Associate a QoS specification to a volume type Usage: Table 86.45. Positional arguments Value Summary <qos-spec> Qos specification to modify (name or id) <volume-type> Volume type to associate the qos (name or id) Table 86.46. Command arguments Value Summary -h, --help Show this help message and exit 86.13. volume qos create Create new QoS specification Usage: Table 86.47. Positional arguments Value Summary <name> New qos specification name Table 86.48. Command arguments Value Summary -h, --help Show this help message and exit --consumer <consumer> Consumer of the qos. valid consumers: back-end, both, front-end (defaults to both ) --property <key=value> Set a qos specification property (repeat option to set multiple properties) Table 86.49. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.50. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.51. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.52. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.14. volume qos delete Delete QoS specification Usage: Table 86.53. Positional arguments Value Summary <qos-spec> Qos specification(s) to delete (name or id) Table 86.54. Command arguments Value Summary -h, --help Show this help message and exit --force Allow to delete in-use qos specification(s) 86.15. volume qos disassociate Disassociate a QoS specification from a volume type Usage: Table 86.55. Positional arguments Value Summary <qos-spec> Qos specification to modify (name or id) Table 86.56. Command arguments Value Summary -h, --help Show this help message and exit --volume-type <volume-type> Volume type to disassociate the qos from (name or id) --all Disassociate the qos from every volume type 86.16. volume qos list List QoS specifications Usage: Table 86.57. Command arguments Value Summary -h, --help Show this help message and exit Table 86.58. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.59. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.60. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.61. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.17. volume qos set Set QoS specification properties Usage: Table 86.62. Positional arguments Value Summary <qos-spec> Qos specification to modify (name or id) Table 86.63. Command arguments Value Summary -h, --help Show this help message and exit --property <key=value> Property to add or modify for this qos specification (repeat option to set multiple properties) 86.18. volume qos show Display QoS specification details Usage: Table 86.64. Positional arguments Value Summary <qos-spec> Qos specification to display (name or id) Table 86.65. Command arguments Value Summary -h, --help Show this help message and exit Table 86.66. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.67. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.68. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.69. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.19. volume qos unset Unset QoS specification properties Usage: Table 86.70. Positional arguments Value Summary <qos-spec> Qos specification to modify (name or id) Table 86.71. Command arguments Value Summary -h, --help Show this help message and exit --property <key> Property to remove from the qos specification. (repeat option to unset multiple properties) 86.20. volume service list List service command Usage: Table 86.72. Command arguments Value Summary -h, --help Show this help message and exit --host <host> List services on specified host (name only) --service <service> List only specified service (name only) --long List additional fields in output Table 86.73. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.74. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.75. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.76. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.21. volume service set Set volume service properties Usage: Table 86.77. Positional arguments Value Summary <host> Name of host <service> Name of service (binary name) Table 86.78. Command arguments Value Summary -h, --help Show this help message and exit --enable Enable volume service --disable Disable volume service --disable-reason <reason> Reason for disabling the service (should be used with --disable option) 86.22. volume set Set volume properties Usage: Table 86.79. Positional arguments Value Summary <volume> Volume to modify (name or id) Table 86.80. Command arguments Value Summary -h, --help Show this help message and exit --name <name> New volume name --size <size> Extend volume size in gb --description <description> New volume description --no-property Remove all properties from <volume> (specify both --no-property and --property to remove the current properties before setting new properties.) --property <key=value> Set a property on this volume (repeat option to set multiple properties) --image-property <key=value> Set an image property on this volume (repeat option to set multiple image properties) --state <state> New volume state ("available", "error", "creating", "deleting", "in-use", "attaching", "detaching", "error_deleting" or "maintenance") (admin only) (This option simply changes the state of the volume in the database with no regard to actual status, exercise caution when using) --attached Set volume attachment status to "attached" (admin only) (This option simply changes the state of the volume in the database with no regard to actual status, exercise caution when using) --detached Set volume attachment status to "detached" (admin only) (This option simply changes the state of the volume in the database with no regard to actual status, exercise caution when using) --type <volume-type> New volume type (name or id) --retype-policy <retype-policy> Migration policy while re-typing volume ("never" or "on-demand", default is "never" ) (available only when --type option is specified) --bootable Mark volume as bootable --non-bootable Mark volume as non-bootable --read-only Set volume to read-only access mode --read-write Set volume to read-write access mode 86.23. volume show Display volume details Usage: Table 86.81. Positional arguments Value Summary <volume> Volume to display (name or id) Table 86.82. Command arguments Value Summary -h, --help Show this help message and exit Table 86.83. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.84. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.85. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.86. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.24. volume snapshot create Create new volume snapshot Usage: Table 86.87. Positional arguments Value Summary <snapshot-name> Name of the new snapshot Table 86.88. Command arguments Value Summary -h, --help Show this help message and exit --volume <volume> Volume to snapshot (name or id) (default is <snapshot- name>) --description <description> Description of the snapshot --force Create a snapshot attached to an instance. default is False --property <key=value> Set a property to this snapshot (repeat option to set multiple properties) --remote-source <key=value> The attribute(s) of the exsiting remote volume snapshot (admin required) (repeat option to specify multiple attributes) e.g.: --remote-source source- name=test_name --remote-source source-id=test_id Table 86.89. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.90. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.91. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.92. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.25. volume snapshot delete Delete volume snapshot(s) Usage: Table 86.93. Positional arguments Value Summary <snapshot> Snapshot(s) to delete (name or id) Table 86.94. Command arguments Value Summary -h, --help Show this help message and exit --force Attempt forced removal of snapshot(s), regardless of state (defaults to False) 86.26. volume snapshot list List volume snapshots Usage: Table 86.95. Command arguments Value Summary -h, --help Show this help message and exit --all-projects Include all projects (admin only) --project <project> Filter results by project (name or id) (admin only) --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. --long List additional fields in output --marker <volume-snapshot> The last snapshot id of the page --limit <num-snapshots> Maximum number of snapshots to display --name <name> Filters results by a name. --status <status> Filters results by a status. ( available , error , creating , deleting or error-deleting ) --volume <volume> Filters results by a volume (name or id). Table 86.96. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.97. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.98. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.99. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.27. volume snapshot set Set volume snapshot properties Usage: Table 86.100. Positional arguments Value Summary <snapshot> Snapshot to modify (name or id) Table 86.101. Command arguments Value Summary -h, --help Show this help message and exit --name <name> New snapshot name --description <description> New snapshot description --no-property Remove all properties from <snapshot> (specify both --no-property and --property to remove the current properties before setting new properties.) --property <key=value> Property to add/change for this snapshot (repeat option to set multiple properties) --state <state> New snapshot state. ("available", "error", "creating", "deleting", or "error_deleting") (admin only) (This option simply changes the state of the snapshot in the database with no regard to actual status, exercise caution when using) 86.28. volume snapshot show Display volume snapshot details Usage: Table 86.102. Positional arguments Value Summary <snapshot> Snapshot to display (name or id) Table 86.103. Command arguments Value Summary -h, --help Show this help message and exit Table 86.104. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.105. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.106. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.107. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.29. volume snapshot unset Unset volume snapshot properties Usage: Table 86.108. Positional arguments Value Summary <snapshot> Snapshot to modify (name or id) Table 86.109. Command arguments Value Summary -h, --help Show this help message and exit --property <key> Property to remove from snapshot (repeat option to remove multiple properties) 86.30. volume transfer request accept Accept volume transfer request. Usage: Table 86.110. Positional arguments Value Summary <transfer-request-id> Volume transfer request to accept (id only) Table 86.111. Command arguments Value Summary -h, --help Show this help message and exit --auth-key <key> Volume transfer request authentication key Table 86.112. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.113. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.114. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.115. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.31. volume transfer request create Create volume transfer request. Usage: Table 86.116. Positional arguments Value Summary <volume> Volume to transfer (name or id) Table 86.117. Command arguments Value Summary -h, --help Show this help message and exit --name <name> New transfer request name (default to none) Table 86.118. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.119. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.120. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.121. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.32. volume transfer request delete Delete volume transfer request(s). Usage: Table 86.122. Positional arguments Value Summary <transfer-request> Volume transfer request(s) to delete (name or id) Table 86.123. Command arguments Value Summary -h, --help Show this help message and exit 86.33. volume transfer request list Lists all volume transfer requests. Usage: Table 86.124. Command arguments Value Summary -h, --help Show this help message and exit --all-projects Include all projects (admin only) Table 86.125. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.126. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.127. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.128. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.34. volume transfer request show Show volume transfer request details. Usage: Table 86.129. Positional arguments Value Summary <transfer-request> Volume transfer request to display (name or id) Table 86.130. Command arguments Value Summary -h, --help Show this help message and exit Table 86.131. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.132. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.133. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.134. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.35. volume type create Create new volume type Usage: Table 86.135. Positional arguments Value Summary <name> Volume type name Table 86.136. Command arguments Value Summary -h, --help Show this help message and exit --description <description> Volume type description --public Volume type is accessible to the public --private Volume type is not accessible to the public --property <key=value> Set a property on this volume type (repeat option to set multiple properties) --project <project> Allow <project> to access private type (name or id) (Must be used with --private option) --encryption-provider <provider> Set the encryption provider format for this volume type (e.g "luks" or "plain") (admin only) (This option is required when setting encryption type of a volume. Consider using other encryption options such as: "-- encryption-cipher", "--encryption-key-size" and "-- encryption-control-location") --encryption-cipher <cipher> Set the encryption algorithm or mode for this volume type (e.g "aes-xts-plain64") (admin only) --encryption-key-size <key-size> Set the size of the encryption key of this volume type (e.g "128" or "256") (admin only) --encryption-control-location <control-location> Set the notional service where the encryption is performed ("front-end" or "back-end") (admin only) (The default value for this option is "front-end" when setting encryption type of a volume. Consider using other encryption options such as: "--encryption- cipher", "--encryption-key-size" and "--encryption- provider") --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. Table 86.137. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.138. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.139. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.140. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.36. volume type delete Delete volume type(s) Usage: Table 86.141. Positional arguments Value Summary <volume-type> Volume type(s) to delete (name or id) Table 86.142. Command arguments Value Summary -h, --help Show this help message and exit 86.37. volume type list List volume types Usage: Table 86.143. Command arguments Value Summary -h, --help Show this help message and exit --long List additional fields in output --default List the default volume type --public List only public types --private List only private types (admin only) --encryption-type Display encryption information for each volume type (admin only) Table 86.144. Output formatter options Value Summary -f {csv,json,table,value,yaml}, --format {csv,json,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated --sort-column SORT_COLUMN Specify the column(s) to sort the data (columns specified first have a priority, non-existing columns are ignored), can be repeated Table 86.145. CSV formatter options Value Summary --quote {all,minimal,none,nonnumeric} When to include quotes, defaults to nonnumeric Table 86.146. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.147. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.38. volume type set Set volume type properties Usage: Table 86.148. Positional arguments Value Summary <volume-type> Volume type to modify (name or id) Table 86.149. Command arguments Value Summary -h, --help Show this help message and exit --name <name> Set volume type name --description <description> Set volume type description --property <key=value> Set a property on this volume type (repeat option to set multiple properties) --project <project> Set volume type access to project (name or id) (admin only) --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. --encryption-provider <provider> Set the encryption provider format for this volume type (e.g "luks" or "plain") (admin only) (This option is required when setting encryption type of a volume for the first time. Consider using other encryption options such as: "--encryption-cipher", "--encryption- key-size" and "--encryption-control-location") --encryption-cipher <cipher> Set the encryption algorithm or mode for this volume type (e.g "aes-xts-plain64") (admin only) --encryption-key-size <key-size> Set the size of the encryption key of this volume type (e.g "128" or "256") (admin only) --encryption-control-location <control-location> Set the notional service where the encryption is performed ("front-end" or "back-end") (admin only) (The default value for this option is "front-end" when setting encryption type of a volume for the first time. Consider using other encryption options such as: "--encryption-cipher", "--encryption-key-size" and "-- encryption-provider") 86.39. volume type show Display volume type details Usage: Table 86.150. Positional arguments Value Summary <volume-type> Volume type to display (name or id) Table 86.151. Command arguments Value Summary -h, --help Show this help message and exit --encryption-type Display encryption information of this volume type (admin only) Table 86.152. Output formatter options Value Summary -f {json,shell,table,value,yaml}, --format {json,shell,table,value,yaml} The output format, defaults to table -c COLUMN, --column COLUMN Specify the column(s) to include, can be repeated Table 86.153. JSON formatter options Value Summary --noindent Whether to disable indenting the json Table 86.154. Shell formatter options Value Summary --prefix PREFIX Add a prefix to all variable names Table 86.155. Table formatter options Value Summary --max-width <integer> Maximum display width, <1 to disable. you can also use the CLIFF_MAX_TERM_WIDTH environment variable, but the parameter takes precedence. --fit-width Fit the table to the display width. implied if --max- width greater than 0. Set the environment variable CLIFF_FIT_WIDTH=1 to always enable --print-empty Print empty table if there is no data to show. 86.40. volume type unset Unset volume type properties Usage: Table 86.156. Positional arguments Value Summary <volume-type> Volume type to modify (name or id) Table 86.157. Command arguments Value Summary -h, --help Show this help message and exit --property <key> Remove a property from this volume type (repeat option to remove multiple properties) --project <project> Removes volume type access to project (name or id) (admin only) --project-domain <project-domain> Domain the project belongs to (name or id). this can be used in case collisions between project names exist. --encryption-type Remove the encryption type for this volume type (admin only) 86.41. volume unset Unset volume properties Usage: Table 86.158. Positional arguments Value Summary <volume> Volume to modify (name or id) Table 86.159. Command arguments Value Summary -h, --help Show this help message and exit --property <key> Remove a property from volume (repeat option to remove multiple properties) --image-property <key> Remove an image property from volume (repeat option to remove multiple image properties)
[ "openstack volume backup create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--name <name>] [--description <description>] [--container <container>] [--snapshot <snapshot>] [--force] [--incremental] <volume>", "openstack volume backup delete [-h] [--force] <backup> [<backup> ...]", "openstack volume backup list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--long] [--name <name>] [--status <status>] [--volume <volume>] [--marker <volume-backup>] [--limit <num-backups>] [--all-projects]", "openstack volume backup restore [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] <backup> <volume>", "openstack volume backup set [-h] [--name <name>] [--description <description>] [--state <state>] <backup>", "openstack volume backup show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] <backup>", "openstack volume create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--size <size>] [--type <volume-type>] [--image <image> | --snapshot <snapshot> | --source <volume>] [--description <description>] [--availability-zone <availability-zone>] [--consistency-group consistency-group>] [--property <key=value>] [--hint <key=value>] [--bootable | --non-bootable] [--read-only | --read-write] <name>", "openstack volume delete [-h] [--force | --purge] <volume> [<volume> ...]", "openstack volume host set [-h] [--disable | --enable] <host-name>", "openstack volume list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--project <project>] [--project-domain <project-domain>] [--user <user>] [--user-domain <user-domain>] [--name <name>] [--status <status>] [--all-projects] [--long] [--marker <volume>] [--limit <num-volumes>]", "openstack volume migrate [-h] --host <host> [--force-host-copy] [--lock-volume] <volume>", "openstack volume qos associate [-h] <qos-spec> <volume-type>", "openstack volume qos create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--consumer <consumer>] [--property <key=value>] <name>", "openstack volume qos delete [-h] [--force] <qos-spec> [<qos-spec> ...]", "openstack volume qos disassociate [-h] [--volume-type <volume-type> | --all] <qos-spec>", "openstack volume qos list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN]", "openstack volume qos set [-h] [--property <key=value>] <qos-spec>", "openstack volume qos show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] <qos-spec>", "openstack volume qos unset [-h] [--property <key>] <qos-spec>", "openstack volume service list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--host <host>] [--service <service>] [--long]", "openstack volume service set [-h] [--enable | --disable] [--disable-reason <reason>] <host> <service>", "openstack volume set [-h] [--name <name>] [--size <size>] [--description <description>] [--no-property] [--property <key=value>] [--image-property <key=value>] [--state <state>] [--attached | --detached] [--type <volume-type>] [--retype-policy <retype-policy>] [--bootable | --non-bootable] [--read-only | --read-write] <volume>", "openstack volume show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] <volume>", "openstack volume snapshot create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--volume <volume>] [--description <description>] [--force] [--property <key=value>] [--remote-source <key=value>] <snapshot-name>", "openstack volume snapshot delete [-h] [--force] <snapshot> [<snapshot> ...]", "openstack volume snapshot list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--all-projects] [--project <project>] [--project-domain <project-domain>] [--long] [--marker <volume-snapshot>] [--limit <num-snapshots>] [--name <name>] [--status <status>] [--volume <volume>]", "openstack volume snapshot set [-h] [--name <name>] [--description <description>] [--no-property] [--property <key=value>] [--state <state>] <snapshot>", "openstack volume snapshot show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] <snapshot>", "openstack volume snapshot unset [-h] [--property <key>] <snapshot>", "openstack volume transfer request accept [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] --auth-key <key> <transfer-request-id>", "openstack volume transfer request create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--name <name>] <volume>", "openstack volume transfer request delete [-h] <transfer-request> [<transfer-request> ...]", "openstack volume transfer request list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--all-projects]", "openstack volume transfer request show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] <transfer-request>", "openstack volume type create [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--description <description>] [--public | --private] [--property <key=value>] [--project <project>] [--encryption-provider <provider>] [--encryption-cipher <cipher>] [--encryption-key-size <key-size>] [--encryption-control-location <control-location>] [--project-domain <project-domain>] <name>", "openstack volume type delete [-h] <volume-type> [<volume-type> ...]", "openstack volume type list [-h] [-f {csv,json,table,value,yaml}] [-c COLUMN] [--quote {all,minimal,none,nonnumeric}] [--noindent] [--max-width <integer>] [--fit-width] [--print-empty] [--sort-column SORT_COLUMN] [--long] [--default | --public | --private] [--encryption-type]", "openstack volume type set [-h] [--name <name>] [--description <description>] [--property <key=value>] [--project <project>] [--project-domain <project-domain>] [--encryption-provider <provider>] [--encryption-cipher <cipher>] [--encryption-key-size <key-size>] [--encryption-control-location <control-location>] <volume-type>", "openstack volume type show [-h] [-f {json,shell,table,value,yaml}] [-c COLUMN] [--noindent] [--prefix PREFIX] [--max-width <integer>] [--fit-width] [--print-empty] [--encryption-type] <volume-type>", "openstack volume type unset [-h] [--property <key>] [--project <project>] [--project-domain <project-domain>] [--encryption-type] <volume-type>", "openstack volume unset [-h] [--property <key>] [--image-property <key>] <volume>" ]
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/command_line_interface_reference/volume
Chapter 47. Configuring IBM WebSphere Application Server for KIE Server
Chapter 47. Configuring IBM WebSphere Application Server for KIE Server Before you deploy KIE Server with IBM WebSphere Application Server, you must configure system properties, security settings, JMS requirements, and other properties on IBM WebSphere. These configurations promote an optimal integration with KIE Server. Prerequisites IBM WebSphere Application Server is installed and running. You are logged in to the WebSphere Integrated Solutions Console. 47.1. Enabling administrative security You must enable administrative security in the WebSphere Integrated Solutions Console so that you have the required permissions to create users and groups. Procedure In the WebSphere Integrated Solutions Console, click Security Global Security and ensure that the option Enable Application Security is selected. This may already be selected and overridden at the server level. Click Security Configuration Wizard and click . Select the repository that contains the user information. For example, select Federated repositories for local configurations. Click Enter the Primary administrative user name and Password . Click and then click Finish . Click Save in the Messages window to save your changes to the primary configuration. Figure 47.1. Save security changes In your command terminal, navigate to the IBM WebSphere Application Server /bin directory location that you specified during installation, and run the following commands to stop and restart IBM WebSphere to apply the security changes: Replace <SERVER_NAME> with the IBM WebSphere Application Server name defined in Servers Server Types IBM WebSphere Application Servers of the WebSphere Integrated Solutions Console. 47.2. Configuring JDBC data sources in IBM WebSphere Application Server A data source is an object that enables a Java Database Connectivity (JDBC) client, such as an application server, to establish a connection with a database. Applications look up the data source on the Java Naming and Directory Interface (JNDI) tree or in the local application context and request a database connection to retrieve data. You must configure data sources for IBM WebSphere Application Server to ensure proper data exchange between the servers and the designated database. Typically, solutions using Red Hat Process Automation Manager manage several resources within a single transaction. JMS for asynchronous jobs, events, and timers, for example. Red Hat Process Automation Manager requires an XA driver in the datasource when possible to ensure data atomicity and consistent results. If transactional code for different schemas exists inside listeners or derives from hooks provided by the jBPM engine, an XA driver is also required. Do not use non-XA datasources unless you are positive you do not have multiple resources participating in single transactions. Prerequisites The JDBC providers that you want to use to create database connections are configured on all servers on which you want to deploy the data source. For more information about JDBC providers, see Configuring a JDBC provider in the IBM Knowledge Center. Procedure Navigate to the Software Downloads page in the Red Hat Customer Portal (login required), and select the product and version from the drop-down options: Product: Process Automation Manager Version: 7.13.5 Download Red Hat Process Automation Manager 7.13.5 Add-Ons . Complete the following steps to prepare your database: Extract rhpam-7.13.5-add-ons.zip in a temporary directory, for example TEMP_DIR . Extract TEMP_DIR/rhpam-7.13.5-migration-tool.zip . Change your current directory to the TEMP_DIR/rhpam-7.13.5-migration-tool/ddl-scripts directory. This directory contains DDL scripts for several database types. Import the DDL script for your database type into the database that you want to use, for example: psql jbpm < /ddl-scripts/postgresql/postgresql-jbpm-schema.sql Note If you are using PostgreSQL or Oracle in conjunction with Spring Boot, you must import the respective Spring Boot DDL script, for example /ddl-scripts/oracle/oracle-springboot-jbpm-schema.sql or /ddl-scripts/postgresql/postgresql-springboot-jbpm-schema.sql . In the WebSphere Integrated Solutions Console, navigate to Resources JDBC Data sources . Select the scope at which applications can use the data source. You can choose a cell, node, cluster, or server. Click New to open the Create a data source wizard. Enter a unique name in the Data source name field and a Java Naming and Directory Interface (JNDI) name in the JNDI name field. The application server uses the JNDI name to bind resource references for an application to this data source. Do not assign duplicate JNDI names across different resource types, such as data sources versus J2C connection factories or JMS connection factories. Do not assign duplicate JNDI names for multiple resources of the same type in the same scope. Click and choose Select an existing JDBC provider if the provider has been created, or choose Create new JDBC provider to define details for a new provider. (Creating the JDBC provider before this point is recommended, as a prerequisite to adding data sources.) On the Enter database specific properties for the data source panel, click Use this data source in container managed persistence (CMP) if container managed persistence (CMP) enterprise beans must access this data source. Then fill in any other database-specific properties. Optional: Configure the security aliases for the data source. You can select None for any of the authentication methods, or choose one of the following types: Component-managed authentication alias: Specifies an authentication alias to use when the component resource reference res-auth value is Application . To define a new alias, navigate to Related Items J2EE Connector Architecture (J2C) authentication data entries . A component-managed alias represents a combination of ID and password that is specified in an application for data source authentication. Therefore, the alias that you set on the data source must be identical to the alias in the application code. Mapping-configuration alias: Used only in the absence of a login configuration on the component resource reference. The specification of a login configuration and the associated properties on the component resource reference is the preferred way to define the authentication strategy when the res-auth value is set to Container . If you specify the DefaultPrincipalMapping login configuration, the associated property is a JAAS - J2C authentication data entry alias. Container-managed authentication alias: Used only in the absence of a login configuration on the component resource reference. The specification of a login configuration and the associated properties on the component resource reference determines the container-managed authentication strategy when the res-auth value is set to Container . Click , review the information for the data source, and click Finish to save the configuration and exit the wizard. The Data sources panel displays your new configuration in a table along with any other data sources that are configured for the same scope. For more information about IBM WebSphere Application Server data sources, see Configuring a JDBC provider and data source in the IBM Knowledge Center. 47.3. Configuring Java Message Service (JMS) The Java Message Service (JMS) is a Java API that KIE Server uses to exchange messages with other application servers such as Oracle WebLogic Server and IBM WebSphere Application Server. You must configure your application server to send and receive JMS messages through KIE Server to ensure collaboration between the two servers. 47.3.1. Create a service bus and add IBM WebSphere Application Server You must create a service bus and add the IBM WebSphere Application Server as a member in order to use JMS. Procedure In the WebSphere Integrated Solutions Console, navigate to Service Integration Buses New . Enter a new bus name and clear the Bus Security option. Click and then Finish to create the service bus. Select the service bus that you created. Under Topology , select Bus Members Add . In the Add a New Bus Member wizard, choose the IBM WebSphere Application Server and the type of message store for persistence. You can also specify the properties of the message store. Click Finish to add the new bus member. 47.3.2. Create JMS connection factories To enable messaging with KIE Server, you must create certain JMS connection factories for sending and receiving messages. Prerequisites You have created a service bus for IBM WebSphere Application Server. Procedure In the WebSphere Integrated Solutions Console, navigate to Resources JMS Connection Factories . Select the correct scope and click New . Select the Default Messaging Provider option and click OK . For each of the following required connection factories, enter the name of the connection factory (for example, KIE.SERVER.REQUEST ) and the JNDI name (for example, jms/cf/KIE.SERVER.REQUEST ), and then select the service bus from the Bus Name drop-down list. Leave the default values for the remaining options. Click Apply and then click Save to save the changes to the primary configuration, and repeat for each required factory. 47.3.2.1. JMS connection factories for KIE Server The following table lists the required Java Message Service (JMS) connection factories that enable JMS messaging with KIE Server: Table 47.1. Required JMS connection factories for KIE Server Name Default value Used for KIE.SERVER.REQUEST jms/cf/KIE.SERVER.REQUEST Sending all requests to KIE Server KIE.SERVER.RESPONSE jms/cf/KIE.SERVER.RESPONSE Receiving all responses produced by KIE Server KIE.SERVER.EXECUTOR jms/cf/KIE.SERVER.EXECUTOR KIE Server executor services 47.3.3. Create JMS queues JMS queues are the destination end points for point-to-point messaging. You must create certain JMS queues to enable JMS messaging with KIE Server. Prerequisites You have created a service bus for IBM WebSphere Application Server. Procedure In the WebSphere Integrated Solutions Console, navigate to Resources JMS Queues . Select the correct scope and click New . Select the Default Messaging Provider option and click OK . For each of the following required queues, enter the name of the queue (for example, KIE.SERVER.REQUEST ) and the JNDI name (for example, jms/KIE.SERVER.REQUEST ), and then select the service bus from the Bus Name drop-down list. From the Queue Name drop-down list, select the Create Service Integration Bus Destination , enter a unique identifier, and select the bus member that you created previously. Click Apply and then click Save to save the changes to the primary configuration, and repeat for each required queue. 47.3.3.1. JMS queues for KIE Server The following table lists the required Java Message Service (JMS) queues that enable JMS messaging with KIE Server: Table 47.2. Required JMS queues for KIE Server Name Default value Used for KIE.SERVER.REQUEST jms/KIE.SERVER.REQUEST Sending all requests to KIE Server KIE.SERVER.RESPONSE jms/KIE.SERVER.RESPONSE Receiving all responses produced by KIE Server KIE.SERVER.EXECUTOR jms/KIE.SERVER.EXECUTOR KIE Server executor services 47.3.4. Create JMS activation specifications A JMS activation specification is required in order to bridge the queue and the message-driven bean that enables JMS. Prerequisites You have created a service bus for IBM WebSphere Application Server. You have created JMS queues. Procedure In the WebSphere Integrated Solutions Console, navigate to Resources JMS Activation Specifications . Select the correct scope and click New . Select the Default Messaging Provider option and click OK . For each of the following required activation specifications, enter the name of the activation specification (for example, KIE.SERVER.REQUEST ) and the JNDI name (for example, jms/activation/KIE.SERVER.REQUEST ), and then select the service bus from the Bus Name drop-down list. From the Destination Type drop-down list, select Queue and enter the name of the corresponding queue as a Destination lookup (for example, jms/KIE.SERVER.REQUEST ). Click Apply and then click Save to save the changes to the primary configuration, and repeat for each required activation specification. 47.3.4.1. JMS activation specifications for KIE Server The following table lists the required Java Message Service (JMS) activation specifications that enable JMS messaging with KIE Server: Table 47.3. Required JMS activation specifications for KIE Server Name Default value Used for KIE.SERVER.REQUEST jms/activation/KIE.SERVER.REQUEST Sending all requests to KIE Server KIE.SERVER.RESPONSE jms/activation/KIE.SERVER.RESPONSE Receiving all responses produced by KIE Server KIE.SERVER.EXECUTOR jms/activation/KIE.SERVER.EXECUTOR KIE Server executor services 47.4. Setting system properties in IBM WebSphere Application Server Set the system properties listed in this section on your IBM WebSphere Application Server before you deploy KIE Server. Procedure In the WebSphere Integrated Solutions Console, navigate to Servers Server Types IBM WebSphere Application Servers . In the list of application servers, choose the server on which you are going to deploy KIE Server. Under the Server Infrastructure , click Java and Process Management Process Definition . Figure 47.2. WebSphere configuration page Under Additional Properties , click Java Virtual Machine . Figure 47.3. Process definition configuration page This opens the configuration properties for the JVM that is used to start IBM WebSphere. Set both the Initial heap size and Maximum heap size to 2048 and click Apply to increase the Java Virtual Machine (JVM) memory size. KIE Server has been tested with these values. If you do not increase the JVM memory size, IBM WebSphere Application Server freezes or causes deployment errors when deploying KIE Server. Under Additional Properties , click Custom Properties . Click New Custom JVM Properties and add the following properties to IBM WebSphere: Table 47.4. System properties for KIE Server Name Value Description kie.server.jms.queues.response jms/KIE.SERVER.RESPONSE The JNDI name of JMS queue for responses used by KIE Server. org.kie.server.domain WSLogin JAAS LoginContext domain used to authenticate users when using JMS. org.kie.server.persistence.ds jdbc/jbpm Data source JNDI name for KIE Server. org.kie.server.persistence.tm org.hibernate.engine.transaction.jta.platform.internal.WebSphereJtaPlatform Transaction manager platform for setting Hibernate properties. org.kie.server.persistence.dialect Example: org.hibernate.dialect.H2Dialect Specifies the Hibernate dialect to be used. Set according to data source. org.kie.executor.jms.queue jms/KIE.SERVER.EXECUTOR Job executor JMS queue for KIE Server. org.kie.executor.jms.cf jms/cf/KIE.SERVER.EXECUTOR Job executor JMS connection factory for KIE Server. org.kie.server.router Example: http://localhost:9000 (Optional) Specifies one or more URLs for one or more KIE Server routers (Smart Routers) that the application server is a part of in a clustered KIE Server environment. org.jboss.logging.provider jdk This property is only required where a CA SiteMinder TAI (SMTAI) is installed in the environment. Using this property forces Hibernate to use JDK instead of log4j for logging within Dashbuilder. CA SiteMinder TAI (SMTAI) contains an old version of log4j , which causes conflicts. Click Save to save the changes to the primary configuration. 47.5. Stopping and restarting IBM WebSphere Application Server After you have configured all required system properties in IBM WebSphere Application Server, stop and restart the IBM server to ensure that the configurations are applied. Procedure In your command terminal, navigate to the IBM WebSphere Application Server /bin directory location that you specified during installation, and run the following commands to stop and restart IBM WebSphere to apply the configuration changes: Replace <SERVER_NAME> with the IBM WebSphere Application Server name defined in Servers Server Types IBM WebSphere Application Servers of the WebSphere Integrated Solutions Console.
[ "sudo ./stopServer.sh <SERVER_NAME>", "sudo ./startServer.sh <SERVER_NAME>", "psql jbpm < /ddl-scripts/postgresql/postgresql-jbpm-schema.sql", "sudo ./stopServer.sh <SERVER_NAME>", "sudo ./startServer.sh <SERVER_NAME>" ]
https://docs.redhat.com/en/documentation/red_hat_process_automation_manager/7.13/html/installing_and_configuring_red_hat_process_automation_manager/was-configure-proc
Preface
Preface Open Java Development Kit (OpenJDK) is a free and open-source implementation of the Java Platform, Standard Edition (Java SE). Eclipse Temurin is available in four LTS versions: OpenJDK 8u, OpenJDK 11u, OpenJDK 17u, and OpenJDK 21u. Binary files for Eclipse Temurin are available for macOS, Microsoft Windows, and multiple Linux x86 Operating Systems including Red Hat Enterprise Linux and Ubuntu.
null
https://docs.redhat.com/en/documentation/red_hat_build_of_openjdk/8/html/eclipse_temurin_8.0.422_release_notes/pr01
Chapter 14. Uninstalling Logging
Chapter 14. Uninstalling Logging You can remove logging from your OpenShift Container Platform cluster by removing installed Operators and related custom resources (CRs). 14.1. Uninstalling the logging You can stop aggregating logs by deleting the Red Hat OpenShift Logging Operator and the ClusterLogging custom resource (CR). Prerequisites You have administrator permissions. You have access to the Administrator perspective of the OpenShift Container Platform web console. Procedure Go to the Administration Custom Resource Definitions page, and click ClusterLogging . On the Custom Resource Definition Details page, click Instances . Click the options menu to the instance, and click Delete ClusterLogging . Go to the Administration Custom Resource Definitions page. Click the options menu to ClusterLogging , and select Delete Custom Resource Definition . Warning Deleting the ClusterLogging CR does not remove the persistent volume claims (PVCs). To delete the remaining PVCs, persistent volumes (PVs), and associated data, you must take further action. Releasing or deleting PVCs can delete PVs and cause data loss. If you have created a ClusterLogForwarder CR, click the options menu to ClusterLogForwarder , and then click Delete Custom Resource Definition . Go to the Operators Installed Operators page. Click the options menu to the Red Hat OpenShift Logging Operator, and then click Uninstall Operator . Optional: Delete the openshift-logging project. Warning Deleting the openshift-logging project deletes everything in that namespace, including any persistent volume claims (PVCs). If you want to preserve logging data, do not delete the openshift-logging project. Go to the Home Projects page. Click the options menu to the openshift-logging project, and then click Delete Project . Confirm the deletion by typing openshift-logging in the dialog box, and then click Delete . 14.2. Deleting logging PVCs To keep persistent volume claims (PVCs) for reuse with other pods, keep the labels or PVC names that you need to reclaim the PVCs. If you do not want to keep the PVCs, you can delete them. If you want to recover storage space, you can also delete the persistent volumes (PVs). Prerequisites You have administrator permissions. You have access to the Administrator perspective of the OpenShift Container Platform web console. Procedure Go to the Storage Persistent Volume Claims page. Click the options menu to each PVC, and select Delete Persistent Volume Claim . 14.3. Uninstalling Loki Prerequisites You have administrator permissions. You have access to the Administrator perspective of the OpenShift Container Platform web console. If you have not already removed the Red Hat OpenShift Logging Operator and related resources, you have removed references to LokiStack from the ClusterLogging custom resource. Procedure Go to the Administration Custom Resource Definitions page, and click LokiStack . On the Custom Resource Definition Details page, click Instances . Click the options menu to the instance, and then click Delete LokiStack . Go to the Administration Custom Resource Definitions page. Click the options menu to LokiStack , and select Delete Custom Resource Definition . Delete the object storage secret. Go to the Operators Installed Operators page. Click the options menu to the Loki Operator, and then click Uninstall Operator . Optional: Delete the openshift-operators-redhat project. Important Do not delete the openshift-operators-redhat project if other global Operators are installed in this namespace. Go to the Home Projects page. Click the options menu to the openshift-operators-redhat project, and then click Delete Project . Confirm the deletion by typing openshift-operators-redhat in the dialog box, and then click Delete . 14.4. Uninstalling Elasticsearch Prerequisites You have administrator permissions. You have access to the Administrator perspective of the OpenShift Container Platform web console. If you have not already removed the Red Hat OpenShift Logging Operator and related resources, you must remove references to Elasticsearch from the ClusterLogging custom resource. Procedure Go to the Administration Custom Resource Definitions page, and click Elasticsearch . On the Custom Resource Definition Details page, click Instances . Click the options menu to the instance, and then click Delete Elasticsearch . Go to the Administration Custom Resource Definitions page. Click the options menu to Elasticsearch , and select Delete Custom Resource Definition . Delete the object storage secret. Go to the Operators Installed Operators page. Click the options menu to the OpenShift Elasticsearch Operator, and then click Uninstall Operator . Optional: Delete the openshift-operators-redhat project. Important Do not delete the openshift-operators-redhat project if other global Operators are installed in this namespace. Go to the Home Projects page. Click the options menu to the openshift-operators-redhat project, and then click Delete Project . Confirm the deletion by typing openshift-operators-redhat in the dialog box, and then click Delete . 14.5. Deleting Operators from a cluster using the CLI Cluster administrators can delete installed Operators from a selected namespace by using the CLI. Prerequisites You have access to an OpenShift Container Platform cluster using an account with cluster-admin permissions. The OpenShift CLI ( oc ) is installed on your workstation. Procedure Ensure the latest version of the subscribed operator (for example, serverless-operator ) is identified in the currentCSV field. USD oc get subscription.operators.coreos.com serverless-operator -n openshift-serverless -o yaml | grep currentCSV Example output currentCSV: serverless-operator.v1.28.0 Delete the subscription (for example, serverless-operator ): USD oc delete subscription.operators.coreos.com serverless-operator -n openshift-serverless Example output subscription.operators.coreos.com "serverless-operator" deleted Delete the CSV for the Operator in the target namespace using the currentCSV value from the step: USD oc delete clusterserviceversion serverless-operator.v1.28.0 -n openshift-serverless Example output clusterserviceversion.operators.coreos.com "serverless-operator.v1.28.0" deleted Additional resources Reclaiming a persistent volume manually
[ "oc get subscription.operators.coreos.com serverless-operator -n openshift-serverless -o yaml | grep currentCSV", "currentCSV: serverless-operator.v1.28.0", "oc delete subscription.operators.coreos.com serverless-operator -n openshift-serverless", "subscription.operators.coreos.com \"serverless-operator\" deleted", "oc delete clusterserviceversion serverless-operator.v1.28.0 -n openshift-serverless", "clusterserviceversion.operators.coreos.com \"serverless-operator.v1.28.0\" deleted" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.13/html/logging/cluster-logging-uninstall
Administration guide for Red Hat Developer Hub
Administration guide for Red Hat Developer Hub Red Hat Developer Hub 1.2 Red Hat Customer Content Services
[ "kind: ConfigMap apiVersion: v1 metadata: name: app-config-rhdh data: app-config-rhdh.yaml: | app: title: {product}", "... other Red Hat Developer Hub Helm Chart configurations upstream: backstage: extraAppConfig: - configMapRef: app-config-rhdh filename: app-config-rhdh.yaml ... other Red Hat Developer Hub Helm Chart configurations", "kind: Backstage apiVersion: rhdh.redhat.com/v1alpha1 metadata: name: app-config-rhdh data: \"app-config-rhdh.yaml\": | app: title: Red Hat Developer Hub baseUrl: <RHDH_URL> 1 backend: auth: keys: - secret: \"USD{BACKEND_SECRET}\" 2 baseUrl: <RHDH_URL> 3 cors: origin: <RHDH_URL> 4", "node -p 'require(\"crypto\").randomBytes(24).toString(\"base64\")'", "apiVersion: rhdh.redhat.com/v1alpha1 kind: Backstage metadata: name: developer-hub spec: application: appConfig: mountPath: /opt/app-root/src configMaps: - name: app-config-rhdh extraEnvs: secrets: - name: secrets-rhdh extraFiles: mountPath: /opt/app-root/src replicas: 1 route: enabled: true database: enableLocalDb: true", "cat <<EOF | oc -n <your-namespace> create -f - apiVersion: v1 kind: Secret metadata: name: <crt-secret> 1 type: Opaque stringData: postgres-ca.pem: |- -----BEGIN CERTIFICATE----- <ca-certificate-key> 2 postgres-key.key: |- -----BEGIN CERTIFICATE----- <tls-private-key> 3 postgres-crt.pem: |- -----BEGIN CERTIFICATE----- <tls-certificate-key> 4 # EOF", "cat <<EOF | oc -n <your-namespace> create -f - apiVersion: v1 kind: Secret metadata: name: <cred-secret> 1 type: Opaque stringData: 2 POSTGRES_PASSWORD: <password> POSTGRES_PORT: \"<db-port>\" POSTGRES_USER: <username> POSTGRES_HOST: <db-host> PGSSLMODE: <ssl-mode> # for TLS connection 3 NODE_EXTRA_CA_CERTS: <abs-path-to-pem-file> # for TLS connection, e.g. /opt/app-root/src/postgres-crt.pem 4 EOF", "cat <<EOF | oc -n <your-namespace> create -f - apiVersion: rhdh.redhat.com/v1alpha1 kind: Backstage metadata: name: <backstage-instance-name> spec: database: enableLocalDb: false 1 application: extraFiles: mountPath: <path> # e g /opt/app-root/src secrets: - name: <crt-secret> 2 key: postgres-crt.pem, postgres-ca.pem, postgres-key.key # key name as in <crt-secret> Secret extraEnvs: secrets: - name: <cred-secret> 3 #", "cat <<EOF | oc -n <your-namespace> create -f - apiVersion: v1 kind: Secret metadata: name: <crt-secret> 1 type: Opaque stringData: postgres-ca.pem: |- -----BEGIN CERTIFICATE----- <ca-certificate-key> 2 postgres-key.key: |- -----BEGIN CERTIFICATE----- <tls-private-key> 3 postgres-crt.pem: |- -----BEGIN CERTIFICATE----- <tls-certificate-key> 4 # EOF", "cat <<EOF | oc -n <your-namespace> create -f - apiVersion: v1 kind: Secret metadata: name: <cred-secret> 1 type: Opaque stringData: 2 POSTGRES_PASSWORD: <password> POSTGRES_PORT: \"<db-port>\" POSTGRES_USER: <username> POSTGRES_HOST: <db-host> PGSSLMODE: <ssl-mode> # for TLS connection 3 NODE_EXTRA_CA_CERTS: <abs-path-to-pem-file> # for TLS connection, e.g. /opt/app-root/src/postgres-crt.pem 4 EOF", "upstream: postgresql: enabled: false # disable PostgreSQL instance creation 1 auth: existingSecret: <cred-secret> # inject credentials secret to Backstage 2 backstage: appConfig: backend: database: connection: # configure Backstage DB connection parameters host: USD{POSTGRES_HOST} port: USD{POSTGRES_PORT} user: USD{POSTGRES_USER} password: USD{POSTGRES_PASSWORD} ssl: rejectUnauthorized: true, ca: USDfile: /opt/app-root/src/postgres-ca.pem key: USDfile: /opt/app-root/src/postgres-key.key cert: USDfile: /opt/app-root/src/postgres-crt.pem extraEnvVarsSecrets: - <cred-secret> # inject credentials secret to Backstage 3 extraEnvVars: - name: BACKEND_SECRET valueFrom: secretKeyRef: key: backend-secret name: '{{ include \"janus-idp.backend-secret-name\" USD }}' extraVolumeMounts: - mountPath: /opt/app-root/src/dynamic-plugins-root name: dynamic-plugins-root - mountPath: /opt/app-root/src/postgres-crt.pem name: postgres-crt # inject TLS certificate to Backstage cont. 4 subPath: postgres-crt.pem - mountPath: /opt/app-root/src/postgres-ca.pem name: postgres-ca # inject CA certificate to Backstage cont. 5 subPath: postgres-ca.pem - mountPath: /opt/app-root/src/postgres-key.key name: postgres-key # inject TLS private key to Backstage cont. 6 subPath: postgres-key.key extraVolumes: - ephemeral: volumeClaimTemplate: spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi name: dynamic-plugins-root - configMap: defaultMode: 420 name: dynamic-plugins optional: true name: dynamic-plugins - name: dynamic-plugins-npmrc secret: defaultMode: 420 optional: true secretName: dynamic-plugins-npmrc - name: postgres-crt secret: secretName: <crt-secret> 7 #", "helm upgrade -n <your-namespace> <your-deploy-name> openshift-helm-charts/redhat-developer-hub -f values.yaml --version 1.2.6", "port-forward -n <your-namespace> <pgsql-pod-name> <forward-to-port>:<forward-from-port>", "port-forward -n developer-hub backstage-psql-developer-hub-0 15432:5432", "#!/bin/bash to_host=<db-service-host> 1 to_port=5432 2 to_user=postgres 3 from_host=127.0.0.1 4 from_port=15432 5 from_user=postgres 6 allDB=(\"backstage_plugin_app\" \"backstage_plugin_auth\" \"backstage_plugin_catalog\" \"backstage_plugin_permission\" \"backstage_plugin_scaffolder\" \"backstage_plugin_search\") 7 for db in USD{!allDB[@]}; do db=USD{allDB[USDdb]} echo Copying database: USDdb PGPASSWORD=USDTO_PSW psql -h USDto_host -p USDto_port -U USDto_user -c \"create database USDdb;\" pg_dump -h USDfrom_host -p USDfrom_port -U USDfrom_user -d USDdb | PGPASSWORD=USDTO_PSW psql -h USDto_host -p USDto_port -U USDto_user -d USDdb done", "/bin/bash TO_PSW=<destination-db-password> /path/to/db_copy.sh 1", "spec: database: enableLocalDb: false application: # extraFiles: secrets: - name: <crt-secret> key: postgres-crt.pem # key name as in <crt-secret> Secret extraEnvs: secrets: - name: <cred-secret>", "-n developer-hub delete pvc <local-psql-pvc-name>", "get pods -n <your-namespace>", "apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: backstage-read-only rules: - apiGroups: - '*' resources: - pods - configmaps - services - deployments - replicasets - horizontalpodautoscalers - ingresses - statefulsets - limitranges - resourcequotas - daemonsets verbs: - get - list - watch #", "kind: ConfigMap apiVersion: v1 metadata: name: dynamic-plugins-rhdh data: dynamic-plugins.yaml: | includes: - dynamic-plugins.default.yaml plugins: - package: ./dynamic-plugins/dist/backstage-plugin-kubernetes-backend-dynamic disabled: false 1 - package: ./dynamic-plugins/dist/backstage-plugin-kubernetes disabled: false 2 #", "kind: ConfigMap apiVersion: v1 metadata: name: app-config-rhdh data: \"app-config-rhdh.yaml\": | # catalog: rules: - allow: [Component, System, API, Resource, Location] providers: kubernetes: openshift: cluster: openshift processor: namespaceOverride: default defaultOwner: guests schedule: frequency: seconds: 30 timeout: seconds: 5 kubernetes: serviceLocatorMethod: type: 'multiTenant' clusterLocatorMethods: - type: 'config' clusters: - url: <target-cluster-api-server-url> 1 name: openshift authProvider: 'serviceAccount' skipTLSVerify: false 2 skipMetricsLookup: true dashboardUrl: <target-cluster-console-url> 3 dashboardApp: openshift serviceAccountToken: USD{K8S_SERVICE_ACCOUNT_TOKEN} 4 caData: USD{K8S_CONFIG_CA_DATA} 5 #", "-n rhdh-operator get pods -w", "global: dynamic: plugins: - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' disabled: true", "kind: ConfigMap apiVersion: v1 metadata: name: dynamic-plugins-rhdh data: dynamic-plugins.yaml: | includes: - dynamic-plugins.default.yaml plugins: - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' disabled: true", "spec: application: dynamicPluginsConfigMapName: dynamic-plugins-rhdh", "global: dynamic: plugins: - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' disabled: false", "kind: ConfigMap apiVersion: v1 metadata: name: dynamic-plugins-rhdh data: dynamic-plugins.yaml: | includes: - dynamic-plugins.default.yaml plugins: - package: './dynamic-plugins/dist/janus-idp-backstage-plugin-analytics-provider-segment' disabled: false", "spec: application: dynamicPluginsConfigMapName: dynamic-plugins-rhdh", "upstream: backstage: extraEnvVars: - name: SEGMENT_WRITE_KEY value: <segment_key> 1", "spec: application: extraEnvs: envs: - name: SEGMENT_WRITE_KEY value: <segment_key> 1", "upstream: metrics: serviceMonitor: enabled: true path: /metrics", "apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: <custom_resource_name> 1 namespace: <project_name> 2 labels: app.kubernetes.io/instance: <custom_resource_name> app.kubernetes.io/name: backstage spec: namespaceSelector: matchNames: - <project_name> selector: matchLabels: rhdh.redhat.com/app: backstage-<custom_resource_name> endpoints: - port: backend path: '/metrics'", "apply -f <filename>", "upstream: backstage: extraEnvVars: - name: HTTP_PROXY value: '<http_proxy_url>' - name: HTTPS_PROXY value: '<https_proxy_url>' - name: NO_PROXY value: '<no_proxy_settings>'", "upstream: backstage: extraEnvVars: - name: HTTP_PROXY value: 'http://10.10.10.105:3128' - name: HTTPS_PROXY value: 'http://10.10.10.106:3128' - name: NO_PROXY value: 'localhost,example.org'", "Other fields omitted deployment.yaml: |- apiVersion: apps/v1 kind: Deployment spec: template: spec: # Other fields omitted initContainers: - name: install-dynamic-plugins # command omitted env: - name: NPM_CONFIG_USERCONFIG value: /opt/app-root/src/.npmrc.dynamic-plugins - name: HTTP_PROXY value: 'http://10.10.10.105:3128' - name: HTTPS_PROXY value: 'http://10.10.10.106:3128' - name: NO_PROXY value: 'localhost,example.org' # Other fields omitted containers: - name: backstage-backend # Other fields omitted env: - name: APP_CONFIG_backend_listen_port value: \"7007\" - name: HTTP_PROXY value: 'http://10.10.10.105:3128' - name: HTTPS_PROXY value: 'http://10.10.10.106:3128' - name: NO_PROXY value: 'localhost,example.org'", "spec: # Other fields omitted application: extraEnvs: envs: - name: HTTP_PROXY value: 'http://10.10.10.105:3128' - name: HTTPS_PROXY value: 'http://10.10.10.106:3128' - name: NO_PROXY value: 'localhost,example.org'", "upstream: backstage: # --- TRUNCATED --- podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics' prometheus.io/port: '7007' prometheus.io/scheme: 'http'", "Update OPERATOR_NS accordingly OPERATOR_NS=rhdh-operator edit configmap backstage-default-config -n \"USD{OPERATOR_NS}\"", "deployment.yaml: |- apiVersion: apps/v1 kind: Deployment # --- truncated --- spec: template: # --- truncated --- metadata: labels: rhdh.redhat.com/app: # placeholder for 'backstage-<cr-name>' # --- truncated --- annotations: prometheus.io/scrape: 'true' prometheus.io/path: '/metrics' prometheus.io/port: '7007' prometheus.io/scheme: 'http' # --- truncated ---", "--namespace=prometheus port-forward deploy/prometheus-server 9090", "upstream: backstage: # --- Truncated --- extraEnvVars: - name: LOG_LEVEL value: debug", "spec: # Other fields omitted application: extraEnvs: envs: - name: LOG_LEVEL value: debug", "fields @timestamp, @message, kubernetes.container_name | filter kubernetes.container_name in [\"install-dynamic-plugins\", \"backstage-backend\"]", "apiVersion: v1 kind: ConfigMap metadata: name: app-config-rhdh data: \"app-config-rhdh.yaml\": | # --- Truncated --- app: title: Red Hat Developer Hub signInPage: oidc auth: environment: production session: secret: USD{AUTH_SESSION_SECRET} providers: oidc: production: clientId: USD{AWS_COGNITO_APP_CLIENT_ID} clientSecret: USD{AWS_COGNITO_APP_CLIENT_SECRET} metadataUrl: USD{AWS_COGNITO_APP_METADATA_URL} callbackUrl: USD{AWS_COGNITO_APP_CALLBACK_URL} scope: 'openid profile email' prompt: auto", "apiVersion: v1 kind: Secret metadata: name: secrets-rhdh stringData: AUTH_SESSION_SECRET: \"my super auth session secret - change me!!!\" AWS_COGNITO_APP_CLIENT_ID: \"my-aws-cognito-app-client-id\" AWS_COGNITO_APP_CLIENT_SECRET: \"my-aws-cognito-app-client-secret\" AWS_COGNITO_APP_METADATA_URL: \"https://cognito-idp.[region].amazonaws.com/[userPoolId]/.well-known/openid-configuration\" AWS_COGNITO_APP_CALLBACK_URL: \"https://[rhdh_dns]/api/auth/oidc/handler/frame\"", "upstream: backstage: image: pullSecrets: - rhdh-pull-secret podSecurityContext: fsGroup: 2000 extraAppConfig: - filename: app-config-rhdh.yaml configMapRef: app-config-rhdh extraEnvVarsSecrets: - secrets-rhdh", "helm upgrade rhdh openshift-helm-charts/redhat-developer-hub [--version 1.2.6] --values /path/to/values.yaml", "apiVersion: v1 kind: ConfigMap metadata: name: app-config-rhdh data: \"app-config-rhdh.yaml\": | # --- Truncated --- signInPage: oidc auth: # Production to disable guest user login environment: production # Providing an auth.session.secret is needed because the oidc provider requires session support. session: secret: USD{AUTH_SESSION_SECRET} providers: oidc: production: # See https://github.com/backstage/backstage/blob/master/plugins/auth-backend-module-oidc-provider/config.d.ts clientId: USD{AWS_COGNITO_APP_CLIENT_ID} clientSecret: USD{AWS_COGNITO_APP_CLIENT_SECRET} metadataUrl: USD{AWS_COGNITO_APP_METADATA_URL} callbackUrl: USD{AWS_COGNITO_APP_CALLBACK_URL} # Minimal set of scopes needed. Feel free to add more if needed. scope: 'openid profile email' # Note that by default, this provider will use the 'none' prompt which assumes that your are already logged on in the IDP. # You should set prompt to: # - auto: will let the IDP decide if you need to log on or if you can skip login when you have an active SSO session # - login: will force the IDP to always present a login form to the user prompt: auto", "apiVersion: v1 kind: Secret metadata: name: secrets-rhdh stringData: # --- Truncated --- # TODO: Change auth session secret. AUTH_SESSION_SECRET: \"my super auth session secret - change me!!!\" # TODO: user pool app client ID AWS_COGNITO_APP_CLIENT_ID: \"my-aws-cognito-app-client-id\" # TODO: user pool app client Secret AWS_COGNITO_APP_CLIENT_SECRET: \"my-aws-cognito-app-client-secret\" # TODO: Replace region and user pool ID AWS_COGNITO_APP_METADATA_URL: \"https://cognito-idp.[region].amazonaws.com/[userPoolId]/.well-known/openid-configuration\" # TODO: Replace <rhdh_dns> AWS_COGNITO_APP_CALLBACK_URL: \"https://[rhdh_dns]/api/auth/oidc/handler/frame\"", "apiVersion: rhdh.redhat.com/v1alpha1 kind: Backstage metadata: # TODO: this the name of your Developer Hub instance name: my-rhdh spec: application: imagePullSecrets: - \"rhdh-pull-secret\" route: enabled: false appConfig: configMaps: - name: \"app-config-rhdh\" extraEnvs: secrets: - name: \"secrets-rhdh\"", "delete deployment -l app.kubernetes.io/instance=<CR_NAME>", "az aks create/update --resource-group <your-ResourceGroup> --name <your-Cluster> --enable-azure-monitor-metrics", "auth: environment: production providers: microsoft: production: clientId: USD{AZURE_CLIENT_ID} clientSecret: USD{AZURE_CLIENT_SECRET} tenantId: USD{AZURE_TENANT_ID} domainHint: USD{AZURE_TENANT_ID} additionalScopes: - Mail.Send", "-n <your_namespace> apply -f <app-config>.yaml", "stringData: AZURE_CLIENT_ID: <value-of-clientId> AZURE_CLIENT_SECRET: <value-of-clientSecret> AZURE_TENANT_ID: <value-of-tenantId>", "-n <your_namespace> apply -f <azure-secrets>.yaml", "upstream: backstage: extraAppConfig: - filename: configMapRef: <app-config-containing-azure> extraEnvVarsSecrets: - <secret-containing-azure>", "helm -n <your_namespace> upgrade -f <your-values.yaml> <your_deploy_name> redhat-developer/backstage --version 1.2.6", "-n <your_namespace> delete pods -l backstage.io/app=backstage-<your-rhdh-cr>", "auth: environment: production providers: microsoft: production: clientId: USD{AZURE_CLIENT_ID} clientSecret: USD{AZURE_CLIENT_SECRET} tenantId: USD{AZURE_TENANT_ID} domainHint: USD{AZURE_TENANT_ID} additionalScopes: - Mail.Send", "-n <your_namespace> apply -f <app-config>.yaml", "stringData: AZURE_CLIENT_ID: <value-of-clientId> AZURE_CLIENT_SECRET: <value-of-clientSecret> AZURE_TENANT_ID: <value-of-tenantId>", "-n <your_namespace> apply -f <azure-secrets>.yaml", "apiVersion: rhdh.redhat.com/v1alpha1 kind: Backstage metadata: name: <your-rhdh-cr> spec: application: imagePullSecrets: - rhdh-pull-secret route: enabled: false appConfig: configMaps: - name: <app-config-containing-azure> extraEnvs: secrets: - name: <secret-containing-azure>", "-n <your_namespace> apply -f rhdh.yaml", "-n <your_namespace> delete pods -l backstage.io/app=backstage-<your-rhdh-cr>", "ui:options: allowedHosts: - github.com", "apiVersion: scaffolder.backstage.io/v1beta3 kind: Template metadata: name: template-name 1 title: Example template 2 description: An example template for v1beta3 scaffolder. 3 spec: owner: backstage/techdocs-core 4 type: service 5 parameters: 6 - title: Fill in some steps required: - name properties: name: title: Name type: string description: Unique name of the component owner: title: Owner type: string description: Owner of the component - title: Choose a location required: - repoUrl properties: repoUrl: title: Repository Location type: string steps: 7 - id: fetch-base name: Fetch Base action: fetch:template # output: 8 links: - title: Repository 9 url: USD{{ steps['publish'].output.remoteUrl }} - title: Open in catalog 10 icon: catalog entityRef: USD{{ steps['register'].output.entityRef }}", "catalog: rules: - allow: [Template] 1 locations: - type: url 2 target: https://<repository_url>/example-template.yaml 3", "apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: <rhdh_bucket_claim_name> spec: generateBucketName: <rhdh_bucket_claim_name> storageClassName: openshift-storage.noobaa.io", "upstream: backstage: extraEnvVarsSecrets: - <rhdh_bucket_claim_name> extraEnvVarsCM: - <rhdh_bucket_claim_name>", "global: dynamic: includes: - 'dynamic-plugins.default.yaml' plugins: - disabled: false package: ./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic pluginConfig: techdocs: builder: external generator: runIn: local publisher: awsS3: bucketName: 'USD{BUCKET_NAME}' credentials: accessKeyId: 'USD{AWS_ACCESS_KEY_ID}' secretAccessKey: 'USD{AWS_SECRET_ACCESS_KEY}' endpoint: 'https://USD{BUCKET_HOST}' region: 'USD{BUCKET_REGION}' s3ForcePathStyle: true type: awsS3", "apiVersion: objectbucket.io/v1alpha1 kind: Backstage metadata: name: <name> spec: application: extraEnvs: configMaps: - name: <rhdh_bucket_claim_name> secrets: - name: <rhdh_bucket_claim_name>", "kind: ConfigMap apiVersion: v1 metadata: name: dynamic-plugins-rhdh data: dynamic-plugins.yaml: | includes: - dynamic-plugins.default.yaml plugins: - disabled: false package: ./dynamic-plugins/dist/backstage-plugin-techdocs-backend-dynamic pluginConfig: techdocs: builder: external generator: runIn: local publisher: awsS3: bucketName: 'USD{BUCKET_NAME}' credentials: accessKeyId: 'USD{AWS_ACCESS_KEY_ID}' secretAccessKey: 'USD{AWS_SECRET_ACCESS_KEY}' endpoint: 'https://USD{BUCKET_HOST}' region: 'USD{BUCKET_REGION}' s3ForcePathStyle: true type: awsS3", "Prepare REPOSITORY_URL='https://github.com/org/repo' git clone USDREPOSITORY_URL cd repo Install @techdocs/cli, mkdocs and mkdocs plugins npm install -g @techdocs/cli pip install \"mkdocs-techdocs-core==1.*\" Generate techdocs-cli generate --no-docker Publish techdocs-cli publish --publisher-type awsS3 --storage-name <bucket/container> --entity <Namespace/Kind/Name>", "git clone <https://path/to/docs-repository/>", "npm install -g npx", "npm install -g @techdocs/cli", "pip install \"mkdocs-techdocs-core==1.*\"", "npx @techdocs/cli generate --no-docker --source-dir <path_to_repo> --output-dir ./site", "npx @techdocs/cli publish --publisher-type <awsS3|googleGcs> --storage-name <bucket/container> --entity <namespace/kind/name> --directory ./site", "name: Publish TechDocs Site on: push: branches: [main] # You can even set it to run only when TechDocs related files are updated. # paths: # - \"docs/**\" # - \"mkdocs.yml\" jobs: publish-techdocs-site: runs-on: ubuntu-latest # The following secrets are required in your CI environment for publishing files to AWS S3. # e.g. You can use GitHub Organization secrets to set them for all existing and new repositories. env: TECHDOCS_S3_BUCKET_NAME: USD{{ secrets.TECHDOCS_S3_BUCKET_NAME }} AWS_ACCESS_KEY_ID: USD{{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: USD{{ secrets.AWS_SECRET_ACCESS_KEY }} AWS_REGION: USD{{ secrets.AWS_REGION }} ENTITY_NAMESPACE: 'default' ENTITY_KIND: 'Component' ENTITY_NAME: 'my-doc-entity' # In a Software template, Scaffolder will replace {{cookiecutter.component_id | jsonify}} # with the correct entity name. This is same as metadata.name in the entity's catalog-info.yaml # ENTITY_NAME: '{{ cookiecutter.component_id | jsonify }}' steps: - name: Checkout code uses: actions/checkout@v3 - uses: actions/setup-node@v3 - uses: actions/setup-python@v4 with: python-version: '3.9' - name: Install techdocs-cli run: sudo npm install -g @techdocs/cli - name: Install mkdocs and mkdocs plugins run: python -m pip install mkdocs-techdocs-core==1.* - name: Generate docs site run: techdocs-cli generate --no-docker --verbose - name: Publish docs site run: techdocs-cli publish --publisher-type awsS3 --storage-name USDTECHDOCS_S3_BUCKET_NAME --entity USDENTITY_NAMESPACE/USDENTITY_KIND/USDENTITY_NAME" ]
https://docs.redhat.com/en/documentation/red_hat_developer_hub/1.2/html-single/administration_guide_for_red_hat_developer_hub/index
Chapter 1. Load Balancer Overview
Chapter 1. Load Balancer Overview The Load Balancer is a set of integrated software components that provide for balancing IP traffic across a set of real servers. It consists of two main technologies to monitor cluster members and cluster services: Keepalived and HAProxy. Keepalived uses Linux virtual server ( LVS ) to perform load balancing and failover tasks on the active and passive routers, while HAProxy performs load balancing and high-availability services to TCP and HTTP applications. 1.1. keepalived The keepalived daemon runs on both the active and passive LVS routers. All routers running keepalived use the Virtual Redundancy Routing Protocol (VRRP). The active router sends VRRP advertisements at periodic intervals; if the backup routers fail to receive these advertisements, a new active router is elected. On the active router, keepalived can also perform load balancing tasks for real servers. Keepalived is the controlling process related to LVS routers. At boot time, the daemon is started by the systemctl command, which reads the configuration file /etc/keepalived/keepalived.conf . On the active router, the keepalived daemon starts the LVS service and monitors the health of the services based on the configured topology. Using VRRP, the active router sends periodic advertisements to the backup routers. On the backup routers, the VRRP instance determines the running status of the active router. If the active router fails to advertise after a user-configurable interval, Keepalived initiates failover. During failover, the virtual servers are cleared. The new active router takes control of the virtual IP address ( VIP ), sends out an ARP message, sets up IPVS table entries (virtual servers), begins health checks, and starts sending VRRP advertisements. Keepalived performs failover on layer 4, or the Transport layer, upon which TCP conducts connection-based data transmissions. When a real server fails to reply to simple timeout TCP connection, keepalived detects that the server has failed and removes it from the server pool.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/load_balancer_administration/ch-lvs-overview-VSA
Chapter 8. Backing Up Satellite Server and Capsule Server
Chapter 8. Backing Up Satellite Server and Capsule Server You can back up your Satellite deployment to ensure the continuity of your Red Hat Satellite deployment and associated data in the event of a disaster. If your deployment uses custom configurations, you must consider how to handle these custom configurations when you plan your backup and disaster recovery policy. + Note The instances created using the backup tool are not supposed to run in parallel in a production environment. You must decommission any old instances after restoring the backup. To create a backup of your Satellite Server or Capsule Server and all associated data, use the satellite-maintain backup command. Backing up to a separate storage device on a separate system is highly recommended. Satellite services are unavailable during the backup. Therefore, you must ensure that no other tasks are scheduled by other administrators. You can schedule a backup using cron . For more information, see the Section 8.5, "Example of a Weekly Full Backup Followed by Daily Incremental Backups" . During offline or snapshot backups, the services are inactive and Satellite is in a maintenance mode. All the traffic from outside on port 443 is rejected by a firewall to ensure there are no modifications triggered. A backup contains sensitive information from the /root/ssl-build directory. For example, it can contain hostnames, ssh keys, request files and SSL certificates. You must encrypt or move the backup to a secure location to minimize the risk of damage or unauthorized access to the hosts. Conventional Backup Methods You can also use conventional backup methods. For more information, see System Backup and Recovery in the Red Hat Enterprise Linux 7 System Administrator's Guide . Note If you plan to use the satellite-maintain backup command to create a backup, do not stop Satellite services. When creating a snapshot or conventional backup, you must stop all services as follows: Start the services after creating a snapshot or conventional backup: 8.1. Estimating the Size of a Backup The full backup creates uncompressed archives of PostgreSQL and Pulp database files, and Satellite configuration files. Compression occurs after the archives are created to decrease the time when Satellite services are unavailable. A full backup requires space to store the following data: Uncompressed Satellite database and configuration files Compressed Satellite database and configuration files An extra 20% of the total estimated space to ensure a reliable backup Procedure Enter the du command to estimate the size of uncompressed directories containing Satellite database and configuration files: For Red Hat Enterprise Linux 8: For Red Hat Enterprise Linux 7: Calculate how much space is required to store the compressed data. The following table describes the compression ratio of all data items included in the backup: Table 8.1. Backup Data Compression Ratio for Red Hat Enterprise Linux 8 Data type Directory Ratio Example results PostgreSQL database files /var/lib/pgsql/data 80 - 85% 100 GB 20 GB Pulp RPM files /var/lib/pulp (not compressed) 100 GB Configuration files /var/lib/qpidd /var/lib/tftpboot /etc /root/ssl-build /var/www/html/pub /opt/puppetlabs 85% 942 MB 141 MB Table 8.2. Backup Data Compression Ratio for Red Hat Enterprise Linux 7 Data type Directory Ratio Example results PostgreSQL database files /var/opt/rh/rh-postgresql12/lib/pgsql/data 80 - 85% 100 GB 20 GB Pulp RPM files /var/lib/pulp (not compressed) 100 GB Configuration files /var/lib/qpidd /var/lib/tftpboot /etc /root/ssl-build /var/www/html/pub /opt/puppetlabs 85% 942 MB 141 MB In this example, the compressed backup data occupies 120 GB in total. To calculate the amount of available space you require to store a backup, calculate the sum of the estimated values of compressed and uncompressed backup data, and add an extra 20% to ensure a reliable backup. This example requires 201 GB plus 120 GB for the uncompressed and compressed backup data, 321 GB in total. With 64 GB of extra space, 385 GB must be allocated for the backup location. 8.2. Performing a Full Backup of Satellite Server or Capsule Server Red Hat Satellite uses the satellite-maintain backup command to make backups. There are three main methods of backing up Satellite Server: Offline backup Online backup Snapshot backups For more information about each of these methods, you can view the usage statements for each backup method. Offline backups Online backups Snapshots backups Directory creation The satellite-maintain backup command creates a time-stamped subdirectory in the backup directory that you specify. The satellite-maintain backup command does not overwrite backups, therefore you must select the correct directory or subdirectory when restoring from a backup or an incremental backup. The satellite-maintain backup command stops and restarts services as required. When you run the satellite-maintain backup offline command, the following default backup directories are created: satellite-backup on Satellite foreman-proxy-backup on Capsule If you want to set a custom directory name, add the --preserve-directory option and add a directory name. The backup is then stored in the directory you provide in the command line. If you use the --preserve-directory option, no data is removed if the backup fails. Note that if you use a local PostgreSQL database, the postgres user requires write access to the backup directory. Remote databases You can use the satellite-maintain backup command to back up remote databases. You can use both online and offline methods to back up remote databases, but if you use offline methods, such as snapshot, the satellite-maintain backup command performs a database dump. Prerequisites Ensure that your backup location has sufficient available disk space to store the backup. For more information, see Section 8.1, "Estimating the Size of a Backup" . Warning Request other users of Satellite Server or Capsule Server to save any changes and warn them that Satellite services are unavailable for the duration of the backup. Ensure no other tasks are scheduled for the same time as the backup. Procedure On Satellite Server, enter the following command: On Capsule Server, enter the following command: 8.3. Performing a Backup without Pulp Content You can perform an offline backup that excludes the contents of the Pulp directory. The backup without Pulp content is useful for debugging purposes and is only intended to provide access to configuration files without backing up the Pulp database. For production usecases, do not restore from a directory that does not contain Pulp content. Warning Request other users of Satellite Server or Capsule Server to save any changes and warn them that Satellite services are unavailable for the duration of the backup. Ensure no other tasks are scheduled for the same time as the backup. Prerequisites Ensure that your backup location has sufficient available disk space to store the backup. For more information, see Section 8.1, "Estimating the Size of a Backup" . Procedure To perform an offline backup without Pulp content, enter the following command: 8.4. Performing an Incremental Backup Use this procedure to perform an offline backup of any changes since a backup. To perform incremental backups, you must perform a full backup as a reference to create the first incremental backup of a sequence. Keep the most recent full backup and a complete sequence of incremental backups to restore from. Warning Request other users of Satellite Server or Capsule Server to save any changes and warn them that Satellite services are unavailable for the duration of the backup. Ensure no other tasks are scheduled for the same time as the backup. Prerequisites Ensure that your backup location has sufficient available disk space to store the backup. For more information, see Section 8.1, "Estimating the Size of a Backup" . Procedure To perform a full offline backup, enter the following command: To create a directory within your backup directory to store the first incremental back up, enter the satellite-maintain backup command with the --incremental option: To create the second incremental backup, enter the satellite-maintain backup command with the --incremental option and include the path to the first incremental backup to indicate the starting point for the increment. This creates a directory for the second incremental backup in your backup directory: Optional: If you want to point to a different version of the backup, and make a series of increments with that version of the backup as the starting point, you can do this at any time. For example, if you want to make a new incremental backup from the full backup rather than the first or second incremental backup, point to the full backup directory: 8.5. Example of a Weekly Full Backup Followed by Daily Incremental Backups The following script performs a full backup on a Sunday followed by incremental backups for each of the following days. A new subdirectory is created for each day that an incremental backup is performed. The script requires a daily cron job. Note that the satellite-maintain backup command requires /sbin and /usr/sbin directories to be in PATH and the --assumeyes option is used to skip the confirmation prompt. 8.6. Performing an Online Backup Perform an online backup only for debugging purposes. Risks Associated with Online Backups When performing an online backup, if there are procedures affecting the Pulp database, the Pulp part of the backup procedure repeats until it is no longer being altered. Because the backup of the Pulp database is the most time consuming part of backing up Satellite, if you make a change that alters the Pulp database during this time, the backup procedure keeps restarting. For production environments, use the snapshot method. For more information, see Section 8.7, "Performing a Snapshot Backup" . If you want to use the online backup method in production, proceed with caution and ensure that no modifications occur during the backup. Warning Request other users of Satellite Server or Capsule Server to save any changes and warn them that Satellite services are unavailable for the duration of the backup. Ensure no other tasks are scheduled for the same time as the backup. Prerequisites Ensure that your backup location has sufficient available disk space to store the backup. For more information, see Section 8.1, "Estimating the Size of a Backup" . Procedure To perform an online backup, enter the following command: 8.7. Performing a Snapshot Backup You can perform a snapshot backup that uses Logical Volume Manager (LVM) snapshots of the Pulp, and PostgreSQL directories. Creating a backup from LVM snapshots mitigates the risk of an inconsistent backup. The snapshot backup method is faster than a full offline backup and therefore reduces Satellite downtime. To view the usage statement, enter the following command: Warning Request other Satellite Server or Capsule Server users to save any changes and warn them that Satellite services are unavailable for the duration of the backup. Ensure no other tasks are scheduled for the same time as the backup. Prerequisites The system uses LVM for the directories that you snapshot: /var/lib/pulp/ , and /var/opt/rh/rh-postgresql12/lib/pgsql . The free disk space in the relevant volume group (VG) is three times the size of the snapshot. More precisely, the VG must have enough space unreserved by the member logical volumes (LVs) to accommodate new snapshots. In addition, one of the LVs must have enough free space for the backup directory. The target backup directory is on a different LV than the directories that you snapshot. Procedure To perform a snapshot backup, enter the satellite-maintain backup snapshot command: The satellite-maintain backup snapshot command creates snapshots when the services are active, and stops all services which can impact the backup. This makes the maintenance window shorter. After the successful snapshot, all services are restarted and LVM snapshots are removed. 8.8. White-listing and Skipping Steps When Performing Backups A backup using the satellite-maintain backup command proceeds in a sequence of steps. To skip part of the backup add the --whitelist option to the command and add the step label that you want to omit. Procedure To display a list of available step labels, enter the following command: To skip a step of the backup, enter the satellite-maintain backup command with the --whitelist option. For example:
[ "satellite-maintain service stop", "satellite-maintain service start", "du -sh /var/lib/pgsql/data /var/lib/pulp 100G /var/lib/pgsql/data 100G /var/lib/pulp du -csh /var/lib/qpidd /var/lib/tftpboot /etc /root/ssl-build /var/www/html/pub /opt/puppetlabs 886M /var/lib/qpidd 16M /var/lib/tftpboot 37M /etc 900K /root/ssl-build 100K /var/www/html/pub 2M /opt/puppetlabs 942M total", "du -sh /var/opt/rh/rh-postgresql12/lib/pgsql/data /var/lib/pulp 100G /var/opt/rh/rh-postgresql12/lib/pgsql/data 100G /var/lib/pulp du -csh /var/lib/qpidd /var/lib/tftpboot /etc /root/ssl-build /var/www/html/pub /opt/puppetlabs 886M /var/lib/qpidd 16M /var/lib/tftpboot 37M /etc 900K /root/ssl-build 100K /var/www/html/pub 2M /opt/puppetlabs 942M total", "satellite-maintain backup offline --help", "satellite-maintain backup online --help", "satellite-maintain backup snapshot --help", "satellite-maintain backup offline /var/satellite-backup", "satellite-maintain backup offline /var/foreman-proxy-backup", "satellite-maintain backup offline --skip-pulp-content /var/backup_directory", "satellite-maintain backup offline /var/backup_directory", "satellite-maintain backup offline --incremental /var/backup_directory/full_backup /var/backup_directory", "satellite-maintain backup offline --incremental /var/backup_directory/first_incremental_backup /var/backup_directory", "satellite-maintain backup offline --incremental /var/backup_directory/full_backup /var/backup_directory", "#!/bin/bash -e PATH=/sbin:/bin:/usr/sbin:/usr/bin DESTINATION=/var/backup_directory if [[ USD(date +%w) == 0 ]]; then satellite-maintain backup offline --assumeyes USDDESTINATION else LAST=USD(ls -td -- USDDESTINATION/*/ | head -n 1) satellite-maintain backup offline --assumeyes --incremental \"USDLAST\" USDDESTINATION fi exit 0", "satellite-maintain backup online /var/backup_directory", "satellite-maintain backup snapshot -h", "satellite-maintain backup snapshot /var/backup_directory", "satellite-maintain advanced procedure run -h", "satellite-maintain backup online --whitelist backup-metadata -y /var/backup_directory" ]
https://docs.redhat.com/en/documentation/red_hat_satellite/6.11/html/administering_red_hat_satellite/Backing_Up_Server_and_Proxy_admin
Chapter 5. Using Red Hat entitlements in pipelines
Chapter 5. Using Red Hat entitlements in pipelines If you have Red Hat Enterprise Linux (RHEL) entitlements, you can use these entitlements to build container images in your pipelines. The Insight Operator automatically manages your entitlements after you import them into this operator from Simple Common Access (SCA). This operator provides a secret named etc-pki-entitlement in the openshift-config-managed namespace. You can use Red Hat entitlements in your pipelines in one of the following two ways: Manually copy the secret into the namespace of the pipeline. This method is least complex if you have a limited number of pipeline namespaces. Use the Shared Resources Container Storage Interface (CSI) Driver Operator to share the secret between namespaces automatically. 5.1. Prerequisites You logged on to your OpenShift Container Platform cluster using the oc command line tool. You enabled the Insights Operator feature on your OpenShift Container Platform cluster. If you want to use the Shared Resources CSI Driver operator to share the secret between namespaces, you must also enable the Shared Resources CSI driver. For information about enabling features, including the Insights Operator and Shared Resources CSI Driver, see Enabling features using feature gates . Note After you enable the Insights Operator, you must wait for some time to ensure that the cluster updates all the nodes with this operator. You can monitor the status of all nodes by entering the following command: USD oc get nodes -w To verify that the Insights Operator is active, check that the insights-operator pod is running in the openshift-insights namespace by entering the following command: USD oc get pods -n openshift-insights You configured the importing of your Red Hat entitlements into the Insights Operator. For information about importing the entitlements, see Importing simple content access entitlements with Insights Operator . Note To verify that the Insights Operator made your entitlements available, is active, check that the etc-pki-entitlement secret is present in the openshift-config-managed namespace by entering the following command: USD oc get secret etc-pki-entitlement -n openshift-config-managed 5.2. Using Red Hat entitlements by manually copying the etc-pki-entitlement secret You can copy the etc-pki-entitlement secret from the openshift-config-managed namespace into the namespace of your pipeline. You can then configure your pipeline to use this secret for the Buildah task. Prerequisites You installed the jq package on your system. This package is available in Red Hat Enterprise Linux (RHEL). Procedure Copy the etc-pki-entitlement secret from the openshift-config-managed namespace into the namespace of your pipeline by running the following command: USD oc get secret etc-pki-entitlement -n openshift-config-managed -o json | \ jq 'del(.metadata.resourceVersion)' | jq 'del(.metadata.creationTimestamp)' | \ jq 'del(.metadata.uid)' | jq 'del(.metadata.namespace)' | \ oc -n <pipeline_namespace> create -f - 1 1 Replace <pipeline_namespace> with the namespace of your pipeline. In your Buildah task definition, use the buildah task provided in the openshift-pipelines namespace or a copy of this task and define the rhel-entitlement workspace, as shown in the following example. In your task run or pipeline run that runs the Buildah task, assign the etc-pki-entitlement secret to the rhel-entitlement workspace, as in the following example. Example pipeline run definition, including the pipeline and task definitions, that uses Red Hat entitlements apiVersion: tekton.dev/v1 kind: PipelineRun metadata: name: buildah-pr-test spec: workspaces: - name: shared-workspace volumeClaimTemplate: spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi - name: dockerconfig secret: secretName: regred - name: rhel-entitlement 1 secret: secretName: etc-pki-entitlement pipelineSpec: workspaces: - name: shared-workspace - name: dockerconfig - name: rhel-entitlement 2 tasks: # ... - name: buildah taskRef: resolver: cluster params: - name: kind value: task - name: name value: buildah - name: namespace value: openshift-pipelines workspaces: - name: source workspace: shared-workspace - name: dockerconfig workspace: dockerconfig - name: rhel-entitlement 3 workspace: rhel-entitlement params: - name: IMAGE value: <image_where_you_want_to_push> 1 The definition of the rhel-entitlement workspace in the pipeline run, assigning the etc-pki-entitlement secret to the workspace 2 The definition of the rhel-entitlement workspace in the pipeline definition 3 The definition of the rhel-entitlement workspace in the task definition 5.3. Using Red Hat entitlements by sharing the secret using the Shared Resources CSI driver operator You can set up sharing of the etc-pki-entitlement secret from the openshift-config-managed namespace to other namespaces using the Shared Resources Container Storage Interface (CSI) Driver Operator. You can then configure your pipeline to use this secret for the Buildah task. Prerequisites You are logged on to your OpenShift Container Platform cluster using the oc command line utility as a user with cluster administrator permissions. You enabled the Shared Resources CSI Driver operator on your OpenShift Container Platform cluster. Procedure Create a SharedSecret custom resource (CR) for sharing the etc-pki-entitlement secret by running the following command: USD oc apply -f - <<EOF apiVersion: sharedresource.openshift.io/v1alpha1 kind: SharedSecret metadata: name: shared-rhel-entitlement spec: secretRef: name: etc-pki-entitlement namespace: openshift-config-managed EOF Create an RBAC role that permits access to the shared secret by running the following command: USD oc apply -f - <<EOF apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: shared-resource-rhel-entitlement namespace: <pipeline_namespace> 1 rules: - apiGroups: - sharedresource.openshift.io resources: - sharedsecrets resourceNames: - shared-rhel-entitlement verbs: - use EOF 1 Replace <pipeline_namespace> with the namespace of your pipeline. Assign the role to the pipeline service account by running the following command: USD oc create rolebinding shared-resource-rhel-entitlement --role=shared-shared-resource-rhel-entitlement \ --serviceaccount=<pipeline-namespace>:pipeline 1 1 Replace <pipeline-namespace> with the namespace of your pipeline. Note If you changed the default service account for OpenShift Pipelines or if you define a custom service account in the pipeline run or task run, assign the role to this account instead of the pipeline account. In your Buildah task definition, use the buildah task provided in the openshift-pipelines namespace or a copy of this task and define the rhel-entitlement workspace, as shown in the following example. In your task run or pipeline run that runs the Buildah task, assign the shared secret to the rhel-entitlement workspace, as in the following example. Example pipeline run definition, including the pipeline and task definitions, that uses Red Hat entitlements apiVersion: tekton.dev/v1 kind: PipelineRun metadata: name: buildah-pr-test-csi spec: workspaces: - name: shared-workspace volumeClaimTemplate: spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi - name: dockerconfig secret: secretName: regred - name: rhel-entitlement 1 csi: readOnly: true driver: csi.sharedresource.openshift.io volumeAttributes: sharedSecret: shared-rhel-entitlement pipelineSpec: workspaces: - name: shared-workspace - name: dockerconfig - name: rhel-entitlement 2 tasks: # ... - name: buildah taskRef: resolver: cluster params: - name: kind value: task - name: name value: buildah - name: namespace value: openshift-pipelines workspaces: - name: source workspace: shared-workspace - name: dockerconfig workspace: dockerconfig - name: rhel-entitlement 3 workspace: rhel-entitlement params: - name: IMAGE value: <image_where_you_want_to_push> 1 The definition of the rhel-entitlement workspace in the pipeline run, assigning the shared-rhel-entitlement CSI shared secret to the workspace 2 The definition of the rhel-entitlement workspace in the pipeline definition 3 The definition of the rhel-entitlement workspace in the task definition 5.4. Additional resources Simple content access Using Insights Operator Importing simple content access entitlements with Insights Operator Shared Resource CSI Driver Operator Changing the default service account for OpenShift Pipelines
[ "oc get nodes -w", "oc get pods -n openshift-insights", "oc get secret etc-pki-entitlement -n openshift-config-managed", "oc get secret etc-pki-entitlement -n openshift-config-managed -o json | jq 'del(.metadata.resourceVersion)' | jq 'del(.metadata.creationTimestamp)' | jq 'del(.metadata.uid)' | jq 'del(.metadata.namespace)' | oc -n <pipeline_namespace> create -f - 1", "apiVersion: tekton.dev/v1 kind: PipelineRun metadata: name: buildah-pr-test spec: workspaces: - name: shared-workspace volumeClaimTemplate: spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi - name: dockerconfig secret: secretName: regred - name: rhel-entitlement 1 secret: secretName: etc-pki-entitlement pipelineSpec: workspaces: - name: shared-workspace - name: dockerconfig - name: rhel-entitlement 2 tasks: - name: buildah taskRef: resolver: cluster params: - name: kind value: task - name: name value: buildah - name: namespace value: openshift-pipelines workspaces: - name: source workspace: shared-workspace - name: dockerconfig workspace: dockerconfig - name: rhel-entitlement 3 workspace: rhel-entitlement params: - name: IMAGE value: <image_where_you_want_to_push>", "oc apply -f - <<EOF apiVersion: sharedresource.openshift.io/v1alpha1 kind: SharedSecret metadata: name: shared-rhel-entitlement spec: secretRef: name: etc-pki-entitlement namespace: openshift-config-managed EOF", "oc apply -f - <<EOF apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: shared-resource-rhel-entitlement namespace: <pipeline_namespace> 1 rules: - apiGroups: - sharedresource.openshift.io resources: - sharedsecrets resourceNames: - shared-rhel-entitlement verbs: - use EOF", "oc create rolebinding shared-resource-rhel-entitlement --role=shared-shared-resource-rhel-entitlement --serviceaccount=<pipeline-namespace>:pipeline 1", "apiVersion: tekton.dev/v1 kind: PipelineRun metadata: name: buildah-pr-test-csi spec: workspaces: - name: shared-workspace volumeClaimTemplate: spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi - name: dockerconfig secret: secretName: regred - name: rhel-entitlement 1 csi: readOnly: true driver: csi.sharedresource.openshift.io volumeAttributes: sharedSecret: shared-rhel-entitlement pipelineSpec: workspaces: - name: shared-workspace - name: dockerconfig - name: rhel-entitlement 2 tasks: - name: buildah taskRef: resolver: cluster params: - name: kind value: task - name: name value: buildah - name: namespace value: openshift-pipelines workspaces: - name: source workspace: shared-workspace - name: dockerconfig workspace: dockerconfig - name: rhel-entitlement 3 workspace: rhel-entitlement params: - name: IMAGE value: <image_where_you_want_to_push>" ]
https://docs.redhat.com/en/documentation/red_hat_openshift_pipelines/1.18/html/creating_cicd_pipelines/using-rh-entitlements-pipelines
3.2. RAID
3.2. RAID 3.2.1. Upgrades Performing an upgrade from a dmraid set to an mdraid set is not supported. A warning will be displayed when an upgrade of this type is attempted. Upgrades from existing mdraid sets and creation of new mdraid sets are possible. The new default superblock can cause problems when upgrading sets. This new superblock format (used on all devices except when creating a RAID1 /boot partition) is now at the beginning of the array, and any file system or LVM data is offset from the beginning of the partition. When the array is not running, LVM and file system mount commands might not detect the device as having a valid volume or file system data. This is intentional, and means that if you want to mount a single disk in a RAID1 array, you need to start the array having only that single disk in it, then mount the array. You cannot mount the bare disk directly. This change has been made as mounting a bare disk directly can silently corrupt the array if a resync is not forced. On subsequent reboots, the RAID system can then consider the disk that was not included in the array as being incompatible, and will disconnect that device from the array. This is also normal. When you are ready to re-add the other disk back into the array, use the mdadm command to hot add the disk into the array, at which point a resync of the changed parts of the disk (if you have write intent bitmaps) or the whole disk (if you have no bitmap) will be performed, and the array will once again be synchronized. From this point, devices will not be disconnected from the array, as the array is considered to be properly assembled. The new superblock supports the concept of named mdraid arrays. Dependency on the old method of array enumeration (for instance, /dev/md0 then /dev/md1 , and so on.) for distinguishing between arrays has been dropped. You can now choose an arbitrary name for the array (such as home , data , or opt ). Create the array with your chosen name using the --name=opt option. Whatever name is given to the array, that name will be created in /dev/md/ (unless a full path is given as a name, in which case that path will be created; or unless you specify a single number, such as 0, and mdadm will start the array using the old /dev/md x scheme). The Anaconda installer does not currently allow for the selection of array names, and instead uses the simple number scheme as a way to emulate how arrays were created in the past. The new mdraid arrays support the use of write intent bitmaps. These help the system identify problematic parts of an array, so that in the event of an unclean shutdown, only the problematic parts need to be resynchronized, and not the entire disk. This drastically reduces the time required to resynchronize. Newly created arrays will automatically have a write intent bitmap added when suitable. For instance, arrays used for swap and very small arrays (such as /boot arrays) do not benefit from having write intent bitmaps. It is possible to add a write intent bitmap to your previously existing arrays after the upgrade is complete using the mdadm --grow command on the device, however write intent bitmaps do incur a modest performance hit (about 3-5% at a bitmap chunk size of 65536, but can increase to 10% or more at small bitmap chunk sizes such as 8192). This means that if a write intent bitmap is added to an array, it is best to keep the chunk size reasonably large. The recommended size is 65536.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/6/html/migration_planning_guide/sect-migration_guide-file_systems-raid
Providing feedback on JBoss EAP documentation
Providing feedback on JBoss EAP documentation To report an error or to improve our documentation, log in to your Red Hat Jira account and submit an issue. If you do not have a Red Hat Jira account, then you will be prompted to create an account. Procedure Click the following link to create a ticket . Enter a brief description of the issue in the Summary . Provide a detailed description of the issue or enhancement in the Description . Include a URL to where the issue occurs in the documentation. Clicking Submit creates and routes the issue to the appropriate documentation team.
null
https://docs.redhat.com/en/documentation/red_hat_jboss_enterprise_application_platform/8.0/html/getting_started_with_developing_applications_for_jboss_eap_deployment/proc_providing-feedback-on-red-hat-documentation_default
Part IV. Appendices
Part IV. Appendices Tools and techniques to help identify, analyze, and address potential problems. It also covers best practices for reporting bugs, ensuring that issues are clearly communicated for prompt resolution.
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/interactively_installing_rhel_from_installation_media/appendices
Chapter 22. Multiple networks
Chapter 22. Multiple networks 22.1. Understanding multiple networks In Kubernetes, container networking is delegated to networking plugins that implement the Container Network Interface (CNI). OpenShift Container Platform uses the Multus CNI plugin to allow chaining of CNI plugins. During cluster installation, you configure your default pod network. The default network handles all ordinary network traffic for the cluster. You can define an additional network based on the available CNI plugins and attach one or more of these networks to your pods. You can define more than one additional network for your cluster, depending on your needs. This gives you flexibility when you configure pods that deliver network functionality, such as switching or routing. 22.1.1. Usage scenarios for an additional network You can use an additional network in situations where network isolation is needed, including data plane and control plane separation. Isolating network traffic is useful for the following performance and security reasons: Performance You can send traffic on two different planes to manage how much traffic is along each plane. Security You can send sensitive traffic onto a network plane that is managed specifically for security considerations, and you can separate private data that must not be shared between tenants or customers. All of the pods in the cluster still use the cluster-wide default network to maintain connectivity across the cluster. Every pod has an eth0 interface that is attached to the cluster-wide pod network. You can view the interfaces for a pod by using the oc exec -it <pod_name> -- ip a command. If you add additional network interfaces that use Multus CNI, they are named net1 , net2 , ... , netN . To attach additional network interfaces to a pod, you must create configurations that define how the interfaces are attached. You specify each interface by using a NetworkAttachmentDefinition custom resource (CR). A CNI configuration inside each of these CRs defines how that interface is created. 22.1.2. Additional networks in OpenShift Container Platform OpenShift Container Platform provides the following CNI plugins for creating additional networks in your cluster: bridge : Configure a bridge-based additional network to allow pods on the same host to communicate with each other and the host. host-device : Configure a host-device additional network to allow pods access to a physical Ethernet network device on the host system. ipvlan : Configure an ipvlan-based additional network to allow pods on a host to communicate with other hosts and pods on those hosts, similar to a macvlan-based additional network. Unlike a macvlan-based additional network, each pod shares the same MAC address as the parent physical network interface. vlan : Configure a vlan-based additional network to allow VLAN-based network isolation and connectivity for pods. macvlan : Configure a macvlan-based additional network to allow pods on a host to communicate with other hosts and pods on those hosts by using a physical network interface. Each pod that is attached to a macvlan-based additional network is provided a unique MAC address. tap : Configure a tap-based additional network to create a tap device inside the container namespace. A tap device enables user space programs to send and receive network packets. SR-IOV : Configure an SR-IOV based additional network to allow pods to attach to a virtual function (VF) interface on SR-IOV capable hardware on the host system. 22.2. Configuring an additional network As a cluster administrator, you can configure an additional network for your cluster. The following network types are supported: Bridge Host device VLAN IPVLAN MACVLAN TAP OVN-Kubernetes 22.2.1. Approaches to managing an additional network You can manage the lifecycle of an additional network in OpenShift Container Platform by using one of two approaches: modifying the Cluster Network Operator (CNO) configuration or applying a YAML manifest. Each approach is mutually exclusive and you can only use one approach for managing an additional network at a time. For either approach, the additional network is managed by a Container Network Interface (CNI) plugin that you configure. The two different approaches are summarized here: Modifying the Cluster Network Operator (CNO) configuration: Configuring additional networks through CNO is only possible for cluster administrators. The CNO automatically creates and manages the NetworkAttachmentDefinition object. By using this approach, you can define NetworkAttachmentDefinition objects at install time through configuration of the install-config . Applying a YAML manifest: You can manage the additional network directly by creating an NetworkAttachmentDefinition object. Compared to modifying the CNO configuration, this approach gives you more granular control and flexibility when it comes to configuration. Note When deploying OpenShift Container Platform nodes with multiple network interfaces on Red Hat OpenStack Platform (RHOSP) with OVN Kubernetes, DNS configuration of the secondary interface might take precedence over the DNS configuration of the primary interface. In this case, remove the DNS nameservers for the subnet ID that is attached to the secondary interface: USD openstack subnet set --dns-nameserver 0.0.0.0 <subnet_id> 22.2.2. IP address assignment for additional networks For additional networks, IP addresses can be assigned using an IP Address Management (IPAM) CNI plugin, which supports various assignment methods, including Dynamic Host Configuration Protocol (DHCP) and static assignment. The DHCP IPAM CNI plugin responsible for dynamic assignment of IP addresses operates with two distinct components: CNI Plugin : Responsible for integrating with the Kubernetes networking stack to request and release IP addresses. DHCP IPAM CNI Daemon : A listener for DHCP events that coordinates with existing DHCP servers in the environment to handle IP address assignment requests. This daemon is not a DHCP server itself. For networks requiring type: dhcp in their IPAM configuration, ensure the following: A DHCP server is available and running in the environment. The DHCP server is external to the cluster and is expected to be part of the customer's existing network infrastructure. The DHCP server is appropriately configured to serve IP addresses to the nodes. In cases where a DHCP server is unavailable in the environment, it is recommended to use the Whereabouts IPAM CNI plugin instead. The Whereabouts CNI provides similar IP address management capabilities without the need for an external DHCP server. Note Use the Whereabouts CNI plugin when there is no external DHCP server or where static IP address management is preferred. The Whereabouts plugin includes a reconciler daemon to manage stale IP address allocations. A DHCP lease must be periodically renewed throughout the container's lifetime, so a separate daemon, the DHCP IPAM CNI Daemon, is required. To deploy the DHCP IPAM CNI daemon, modify the Cluster Network Operator (CNO) configuration to trigger the deployment of this daemon as part of the additional network setup. Additional resources Dynamic IP address (DHCP) assignment configuration Dynamic IP address assignment configuration with Whereabouts 22.2.3. Configuration for an additional network attachment An additional network is configured by using the NetworkAttachmentDefinition API in the k8s.cni.cncf.io API group. Important Do not store any sensitive information or a secret in the NetworkAttachmentDefinition CRD because this information is accessible by the project administration user. The configuration for the API is described in the following table: Table 22.1. NetworkAttachmentDefinition API fields Field Type Description metadata.name string The name for the additional network. metadata.namespace string The namespace that the object is associated with. spec.config string The CNI plugin configuration in JSON format. 22.2.3.1. Configuration of an additional network through the Cluster Network Operator The configuration for an additional network attachment is specified as part of the Cluster Network Operator (CNO) configuration. The following YAML describes the configuration parameters for managing an additional network with the CNO: Cluster Network Operator configuration apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: # ... additionalNetworks: 1 - name: <name> 2 namespace: <namespace> 3 rawCNIConfig: |- 4 { ... } type: Raw 1 An array of one or more additional network configurations. 2 The name for the additional network attachment that you are creating. The name must be unique within the specified namespace . 3 The namespace to create the network attachment in. If you do not specify a value then the default namespace is used. Important To prevent namespace issues for the OVN-Kubernetes network plugin, do not name your additional network attachment default , because this namespace is reserved for the default additional network attachment. 4 A CNI plugin configuration in JSON format. 22.2.3.2. Configuration of an additional network from a YAML manifest The configuration for an additional network is specified from a YAML configuration file, such as in the following example: apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: <name> 1 spec: config: |- 2 { ... } 1 The name for the additional network attachment that you are creating. 2 A CNI plugin configuration in JSON format. 22.2.4. Configurations for additional network types The specific configuration fields for additional networks is described in the following sections. 22.2.4.1. Configuration for a bridge additional network The following object describes the configuration parameters for the bridge CNI plugin: Table 22.2. Bridge CNI plugin JSON configuration object Field Type Description cniVersion string The CNI specification version. The 0.3.1 value is required. name string The value for the name parameter you provided previously for the CNO configuration. type string The name of the CNI plugin to configure: bridge . ipam object The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. bridge string Optional: Specify the name of the virtual bridge to use. If the bridge interface does not exist on the host, it is created. The default value is cni0 . ipMasq boolean Optional: Set to true to enable IP masquerading for traffic that leaves the virtual network. The source IP address for all traffic is rewritten to the bridge's IP address. If the bridge does not have an IP address, this setting has no effect. The default value is false . isGateway boolean Optional: Set to true to assign an IP address to the bridge. The default value is false . isDefaultGateway boolean Optional: Set to true to configure the bridge as the default gateway for the virtual network. The default value is false . If isDefaultGateway is set to true , then isGateway is also set to true automatically. forceAddress boolean Optional: Set to true to allow assignment of a previously assigned IP address to the virtual bridge. When set to false , if an IPv4 address or an IPv6 address from overlapping subsets is assigned to the virtual bridge, an error occurs. The default value is false . hairpinMode boolean Optional: Set to true to allow the virtual bridge to send an Ethernet frame back through the virtual port it was received on. This mode is also known as reflective relay . The default value is false . promiscMode boolean Optional: Set to true to enable promiscuous mode on the bridge. The default value is false . vlan string Optional: Specify a virtual LAN (VLAN) tag as an integer value. By default, no VLAN tag is assigned. preserveDefaultVlan string Optional: Indicates whether the default vlan must be preserved on the veth end connected to the bridge. Defaults to true. vlanTrunk list Optional: Assign a VLAN trunk tag. The default value is none . mtu integer Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. enabledad boolean Optional: Enables duplicate address detection for the container side veth . The default value is false . macspoofchk boolean Optional: Enables mac spoof check, limiting the traffic originating from the container to the mac address of the interface. The default value is false . Note The VLAN parameter configures the VLAN tag on the host end of the veth and also enables the vlan_filtering feature on the bridge interface. Note To configure uplink for a L2 network you need to allow the vlan on the uplink interface by using the following command: USD bridge vlan add vid VLAN_ID dev DEV 22.2.4.1.1. bridge configuration example The following example configures an additional network named bridge-net : { "cniVersion": "0.3.1", "name": "bridge-net", "type": "bridge", "isGateway": true, "vlan": 2, "ipam": { "type": "dhcp" } } 22.2.4.2. Configuration for a host device additional network Note Specify your network device by setting only one of the following parameters: device , hwaddr , kernelpath , or pciBusID . The following object describes the configuration parameters for the host-device CNI plugin: Table 22.3. Host device CNI plugin JSON configuration object Field Type Description cniVersion string The CNI specification version. The 0.3.1 value is required. name string The value for the name parameter you provided previously for the CNO configuration. type string The name of the CNI plugin to configure: host-device . device string Optional: The name of the device, such as eth0 . hwaddr string Optional: The device hardware MAC address. kernelpath string Optional: The Linux kernel device path, such as /sys/devices/pci0000:00/0000:00:1f.6 . pciBusID string Optional: The PCI address of the network device, such as 0000:00:1f.6 . 22.2.4.2.1. host-device configuration example The following example configures an additional network named hostdev-net : { "cniVersion": "0.3.1", "name": "hostdev-net", "type": "host-device", "device": "eth1" } 22.2.4.3. Configuration for a VLAN additional network The following object describes the configuration parameters for the VLAN, vlan , CNI plugin: Table 22.4. VLAN CNI plugin JSON configuration object Field Type Description cniVersion string The CNI specification version. The 0.3.1 value is required. name string The value for the name parameter you provided previously for the CNO configuration. type string The name of the CNI plugin to configure: vlan . master string The Ethernet interface to associate with the network attachment. If a master is not specified, the interface for the default network route is used. vlanId integer Set the ID of the vlan . ipam object The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. mtu integer Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. dns integer Optional: DNS information to return. For example, a priority-ordered list of DNS nameservers. linkInContainer boolean Optional: Specifies whether the master interface is in the container network namespace or the main network namespace. Set the value to true to request the use of a container namespace master interface. Important A NetworkAttachmentDefinition custom resource definition (CRD) with a vlan configuration can be used only on a single pod in a node because the CNI plugin cannot create multiple vlan subinterfaces with the same vlanId on the same master interface. 22.2.4.3.1. VLAN configuration example The following example demonstrates a vlan configuration with an additional network that is named vlan-net : { "name": "vlan-net", "cniVersion": "0.3.1", "type": "vlan", "master": "eth0", "mtu": 1500, "vlanId": 5, "linkInContainer": false, "ipam": { "type": "host-local", "subnet": "10.1.1.0/24" }, "dns": { "nameservers": [ "10.1.1.1", "8.8.8.8" ] } } 22.2.4.4. Configuration for an IPVLAN additional network The following object describes the configuration parameters for the IPVLAN, ipvlan , CNI plugin: Table 22.5. IPVLAN CNI plugin JSON configuration object Field Type Description cniVersion string The CNI specification version. The 0.3.1 value is required. name string The value for the name parameter you provided previously for the CNO configuration. type string The name of the CNI plugin to configure: ipvlan . ipam object The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. This is required unless the plugin is chained. mode string Optional: The operating mode for the virtual network. The value must be l2 , l3 , or l3s . The default value is l2 . master string Optional: The Ethernet interface to associate with the network attachment. If a master is not specified, the interface for the default network route is used. mtu integer Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. linkInContainer boolean Optional: Specifies whether the master interface is in the container network namespace or the main network namespace. Set the value to true to request the use of a container namespace master interface. Note The ipvlan object does not allow virtual interfaces to communicate with the master interface. Therefore the container will not be able to reach the host by using the ipvlan interface. Be sure that the container joins a network that provides connectivity to the host, such as a network supporting the Precision Time Protocol ( PTP ). A single master interface cannot simultaneously be configured to use both macvlan and ipvlan . For IP allocation schemes that cannot be interface agnostic, the ipvlan plugin can be chained with an earlier plugin that handles this logic. If the master is omitted, then the result must contain a single interface name for the ipvlan plugin to enslave. If ipam is omitted, then the result is used to configure the ipvlan interface. 22.2.4.4.1. ipvlan configuration example The following example configures an additional network named ipvlan-net : { "cniVersion": "0.3.1", "name": "ipvlan-net", "type": "ipvlan", "master": "eth1", "linkInContainer": false, "mode": "l3", "ipam": { "type": "static", "addresses": [ { "address": "192.168.10.10/24" } ] } } 22.2.4.5. Configuration for a MACVLAN additional network The following object describes the configuration parameters for the MAC Virtual LAN (MACVLAN) Container Network Interface (CNI) plugin: Table 22.6. MACVLAN CNI plugin JSON configuration object Field Type Description cniVersion string The CNI specification version. The 0.3.1 value is required. name string The value for the name parameter you provided previously for the CNO configuration. type string The name of the CNI plugin to configure: macvlan . ipam object The configuration object for the IPAM CNI plugin. The plugin manages IP address assignment for the attachment definition. mode string Optional: Configures traffic visibility on the virtual network. Must be either bridge , passthru , private , or vepa . If a value is not provided, the default value is bridge . master string Optional: The host network interface to associate with the newly created macvlan interface. If a value is not specified, then the default route interface is used. mtu integer Optional: The maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. linkInContainer boolean Optional: Specifies whether the master interface is in the container network namespace or the main network namespace. Set the value to true to request the use of a container namespace master interface. Note If you specify the master key for the plugin configuration, use a different physical network interface than the one that is associated with your primary network plugin to avoid possible conflicts. 22.2.4.5.1. MACVLAN configuration example The following example configures an additional network named macvlan-net : { "cniVersion": "0.3.1", "name": "macvlan-net", "type": "macvlan", "master": "eth1", "linkInContainer": false, "mode": "bridge", "ipam": { "type": "dhcp" } } 22.2.4.6. Configuration for a TAP additional network The following object describes the configuration parameters for the TAP CNI plugin: Table 22.7. TAP CNI plugin JSON configuration object Field Type Description cniVersion string The CNI specification version. The 0.3.1 value is required. name string The value for the name parameter you provided previously for the CNO configuration. type string The name of the CNI plugin to configure: tap . mac string Optional: Request the specified MAC address for the interface. mtu integer Optional: Set the maximum transmission unit (MTU) to the specified value. The default value is automatically set by the kernel. selinuxcontext string Optional: The SELinux context to associate with the tap device. Note The value system_u:system_r:container_t:s0 is required for OpenShift Container Platform. multiQueue boolean Optional: Set to true to enable multi-queue. owner integer Optional: The user owning the tap device. group integer Optional: The group owning the tap device. bridge string Optional: Set the tap device as a port of an already existing bridge. 22.2.4.6.1. Tap configuration example The following example configures an additional network named mynet : { "name": "mynet", "cniVersion": "0.3.1", "type": "tap", "mac": "00:11:22:33:44:55", "mtu": 1500, "selinuxcontext": "system_u:system_r:container_t:s0", "multiQueue": true, "owner": 0, "group": 0 "bridge": "br1" } 22.2.4.6.2. Setting SELinux boolean for the TAP CNI plugin To create the tap device with the container_t SELinux context, enable the container_use_devices boolean on the host by using the Machine Config Operator (MCO). Prerequisites You have installed the OpenShift CLI ( oc ). Procedure Create a new YAML file named, such as setsebool-container-use-devices.yaml , with the following details: apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: worker name: 99-worker-setsebool spec: config: ignition: version: 3.2.0 systemd: units: - enabled: true name: setsebool.service contents: | [Unit] Description=Set SELinux boolean for the TAP CNI plugin Before=kubelet.service [Service] Type=oneshot ExecStart=/usr/sbin/setsebool container_use_devices=on RemainAfterExit=true [Install] WantedBy=multi-user.target graphical.target Create the new MachineConfig object by running the following command: USD oc apply -f setsebool-container-use-devices.yaml Note Applying any changes to the MachineConfig object causes all affected nodes to gracefully reboot after the change is applied. This update can take some time to be applied. Verify the change is applied by running the following command: USD oc get machineconfigpools Expected output NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE master rendered-master-e5e0c8e8be9194e7c5a882e047379cfa True False False 3 3 3 0 7d2h worker rendered-worker-d6c9ca107fba6cd76cdcbfcedcafa0f2 True False False 3 3 3 0 7d Note All nodes should be in the updated and ready state. Additional resources For more information about enabling an SELinux boolean on a node, see Setting SELinux booleans 22.2.4.7. Configuration for an OVN-Kubernetes additional network The Red Hat OpenShift Networking OVN-Kubernetes network plugin allows the configuration of secondary network interfaces for pods. To configure secondary network interfaces, you must define the configurations in the NetworkAttachmentDefinition custom resource definition (CRD). Note Pod and multi-network policy creation might remain in a pending state until the OVN-Kubernetes control plane agent in the nodes processes the associated network-attachment-definition CRD. You can configure an OVN-Kubernetes additional network in either layer 2 or localnet topologies. A layer 2 topology supports east-west cluster traffic, but does not allow access to the underlying physical network. A localnet topology allows connections to the physical network, but requires additional configuration of the underlying Open vSwitch (OVS) bridge on cluster nodes. The following sections provide example configurations for each of the topologies that OVN-Kubernetes currently allows for secondary networks. Note Networks names must be unique. For example, creating multiple NetworkAttachmentDefinition CRDs with different configurations that reference the same network is unsupported. 22.2.4.7.1. Supported platforms for OVN-Kubernetes additional network You can use an OVN-Kubernetes additional network with the following supported platforms: Bare metal IBM Power(R) IBM Z(R) IBM(R) LinuxONE VMware vSphere Red Hat OpenStack Platform (RHOSP) 22.2.4.7.2. OVN-Kubernetes network plugin JSON configuration table The following table describes the configuration parameters for the OVN-Kubernetes CNI network plugin: Table 22.8. OVN-Kubernetes network plugin JSON configuration table Field Type Description cniVersion string The CNI specification version. The required value is 0.3.1 . name string The name of the network. These networks are not namespaced. For example, you can have a network named l2-network referenced from two different NetworkAttachmentDefinition CRDs that exist on two different namespaces. This ensures that pods making use of the NetworkAttachmentDefinition CRD on their own different namespaces can communicate over the same secondary network. However, those two different NetworkAttachmentDefinition CRDs must also share the same network specific parameters such as topology , subnets , mtu , and excludeSubnets . type string The name of the CNI plugin to configure. This value must be set to ovn-k8s-cni-overlay . topology string The topological configuration for the network. Must be one of layer2 or localnet . subnets string The subnet to use for the network across the cluster. For "topology":"layer2" deployments, IPv6 ( 2001:DBB::/64 ) and dual-stack ( 192.168.100.0/24,2001:DBB::/64 ) subnets are supported. When omitted, the logical switch implementing the network only provides layer 2 communication, and users must configure IP addresses for the pods. Port security only prevents MAC spoofing. mtu string The maximum transmission unit (MTU). The default value, 1300 , is automatically set by the kernel. netAttachDefName string The metadata namespace and name of the network attachment definition CRD where this configuration is included. For example, if this configuration is defined in a NetworkAttachmentDefinition CRD in namespace ns1 named l2-network , this should be set to ns1/l2-network . excludeSubnets string A comma-separated list of CIDRs and IP addresses. IP addresses are removed from the assignable IP address pool and are never passed to the pods. vlanID integer If topology is set to localnet , the specified VLAN tag is assigned to traffic from this additional network. The default is to not assign a VLAN tag. 22.2.4.7.3. Compatibility with multi-network policy The multi-network policy API, which is provided by the MultiNetworkPolicy custom resource definition (CRD) in the k8s.cni.cncf.io API group, is compatible with an OVN-Kubernetes secondary network. When defining a network policy, the network policy rules that can be used depend on whether the OVN-Kubernetes secondary network defines the subnets field. Refer to the following table for details: Table 22.9. Supported multi-network policy selectors based on subnets CNI configuration subnets field specified Allowed multi-network policy selectors Yes podSelector and namespaceSelector ipBlock No ipBlock For example, the following multi-network policy is valid only if the subnets field is defined in the additional network CNI configuration for the additional network named blue2 : Example multi-network policy that uses a pod selector apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: allow-same-namespace annotations: k8s.v1.cni.cncf.io/policy-for: blue2 spec: podSelector: ingress: - from: - podSelector: {} The following example uses the ipBlock network policy selector, which is always valid for an OVN-Kubernetes additional network: Example multi-network policy that uses an IP block selector apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: ingress-ipblock annotations: k8s.v1.cni.cncf.io/policy-for: default/flatl2net spec: podSelector: matchLabels: name: access-control policyTypes: - Ingress ingress: - from: - ipBlock: cidr: 10.200.0.0/30 22.2.4.7.4. Configuration for a layer 2 switched topology The switched (layer 2) topology networks interconnect the workloads through a cluster-wide logical switch. This configuration can be used for IPv6 and dual-stack deployments. Note Layer 2 switched topology networks only allow for the transfer of data packets between pods within a cluster. The following JSON example configures a switched secondary network: { "cniVersion": "0.3.1", "name": "l2-network", "type": "ovn-k8s-cni-overlay", "topology":"layer2", "subnets": "10.100.200.0/24", "mtu": 1300, "netAttachDefName": "ns1/l2-network", "excludeSubnets": "10.100.200.0/29" } 22.2.4.7.5. Configuration for a localnet topology The switched localnet topology interconnects the workloads created as Network Attachment Definitions (NADs) through a cluster-wide logical switch to a physical network. 22.2.4.7.5.1. Prerequisites for configuring OVN-Kubernetes additional network The NMState Operator is installed. For more information, see About the Kubernetes NMState Operator . 22.2.4.7.5.2. Configuration for an OVN-Kubernetes additional network mapping You must map an additional network to the OVN bridge to use it as an OVN-Kubernetes additional network. Bridge mappings allow network traffic to reach the physical network. A bridge mapping associates a physical network name, also known as an interface label, to a bridge created with Open vSwitch (OVS). You can create an NodeNetworkConfigurationPolicy object, part of the nmstate.io/v1 API group, to declaratively create the mapping. This API is provided by the NMState Operator. By using this API you can apply the bridge mapping to nodes that match your specified nodeSelector expression, such as node-role.kubernetes.io/worker: '' . When attaching an additional network, you can either use the existing br-ex bridge or create a new bridge. Which approach to use depends on your specific network infrastructure. If your nodes include only a single network interface, you must use the existing bridge. This network interface is owned and managed by OVN-Kubernetes and you must not remove it from the br-ex bridge or alter the interface configuration. If you remove or alter the network interface, your cluster network will stop working correctly. If your nodes include several network interfaces, you can attach a different network interface to a new bridge, and use that for your additional network. This approach provides for traffic isolation from your primary cluster network. The localnet1 network is mapped to the br-ex bridge in the following example: Example mapping for sharing a bridge apiVersion: nmstate.io/v1 kind: NodeNetworkConfigurationPolicy metadata: name: mapping 1 spec: nodeSelector: node-role.kubernetes.io/worker: '' 2 desiredState: ovn: bridge-mappings: - localnet: localnet1 3 bridge: br-ex 4 state: present 5 1 The name for the configuration object. 2 A node selector that specifies the nodes to apply the node network configuration policy to. 3 The name for the additional network from which traffic is forwarded to the OVS bridge. This additional network must match the name of the spec.config.name field of the NetworkAttachmentDefinition CRD that defines the OVN-Kubernetes additional network. 4 The name of the OVS bridge on the node. This value is required only if you specify state: present . 5 The state for the mapping. Must be either present to add the bridge or absent to remove the bridge. The default value is present . In the following example, the localnet2 network interface is attached to the ovs-br1 bridge. Through this attachment, the network interface is available to the OVN-Kubernetes network plugin as an additional network. Example mapping for nodes with multiple interfaces apiVersion: nmstate.io/v1 kind: NodeNetworkConfigurationPolicy metadata: name: ovs-br1-multiple-networks 1 spec: nodeSelector: node-role.kubernetes.io/worker: '' 2 desiredState: interfaces: - name: ovs-br1 3 description: |- A dedicated OVS bridge with eth1 as a port allowing all VLANs and untagged traffic type: ovs-bridge state: up bridge: allow-extra-patch-ports: true options: stp: false port: - name: eth1 4 ovn: bridge-mappings: - localnet: localnet2 5 bridge: ovs-br1 6 state: present 7 1 The name for the configuration object. 2 A node selector that specifies the nodes to apply the node network configuration policy to. 3 A new OVS bridge, separate from the default bridge used by OVN-Kubernetes for all cluster traffic. 4 A network device on the host system to associate with this new OVS bridge. 5 The name for the additional network from which traffic is forwarded to the OVS bridge. This additional network must match the name of the spec.config.name field of the NetworkAttachmentDefinition CRD that defines the OVN-Kubernetes additional network. 6 The name of the OVS bridge on the node. This value is required only if you specify state: present . 7 The state for the mapping. Must be either present to add the bridge or absent to remove the bridge. The default value is present . This declarative approach is recommended because the NMState Operator applies additional network configuration to all nodes specified by the node selector automatically and transparently. The following JSON example configures a localnet secondary network: { "cniVersion": "0.3.1", "name": "ns1-localnet-network", "type": "ovn-k8s-cni-overlay", "topology":"localnet", "subnets": "202.10.130.112/28", "vlanID": 33, "mtu": 1500, "netAttachDefName": "ns1/localnet-network" "excludeSubnets": "10.100.200.0/29" } 22.2.4.7.6. Configuring pods for additional networks You must specify the secondary network attachments through the k8s.v1.cni.cncf.io/networks annotation. The following example provisions a pod with two secondary attachments, one for each of the attachment configurations presented in this guide. apiVersion: v1 kind: Pod metadata: annotations: k8s.v1.cni.cncf.io/networks: l2-network name: tinypod namespace: ns1 spec: containers: - args: - pause image: k8s.gcr.io/e2e-test-images/agnhost:2.36 imagePullPolicy: IfNotPresent name: agnhost-container 22.2.4.7.7. Configuring pods with a static IP address The following example provisions a pod with a static IP address. Note You can only specify the IP address for a pod's secondary network attachment for layer 2 attachments. Specifying a static IP address for the pod is only possible when the attachment configuration does not feature subnets. apiVersion: v1 kind: Pod metadata: annotations: k8s.v1.cni.cncf.io/networks: '[ { "name": "l2-network", 1 "mac": "02:03:04:05:06:07", 2 "interface": "myiface1", 3 "ips": [ "192.0.2.20/24" ] 4 } ]' name: tinypod namespace: ns1 spec: containers: - args: - pause image: k8s.gcr.io/e2e-test-images/agnhost:2.36 imagePullPolicy: IfNotPresent name: agnhost-container 1 The name of the network. This value must be unique across all NetworkAttachmentDefinition CRDs. 2 The MAC address to be assigned for the interface. 3 The name of the network interface to be created for the pod. 4 The IP addresses to be assigned to the network interface. 22.2.5. Configuration of IP address assignment for an additional network The IP address management (IPAM) Container Network Interface (CNI) plugin provides IP addresses for other CNI plugins. You can use the following IP address assignment types: Static assignment. Dynamic assignment through a DHCP server. The DHCP server you specify must be reachable from the additional network. Dynamic assignment through the Whereabouts IPAM CNI plugin. 22.2.5.1. Static IP address assignment configuration The following table describes the configuration for static IP address assignment: Table 22.10. ipam static configuration object Field Type Description type string The IPAM address type. The value static is required. addresses array An array of objects specifying IP addresses to assign to the virtual interface. Both IPv4 and IPv6 IP addresses are supported. routes array An array of objects specifying routes to configure inside the pod. dns array Optional: An array of objects specifying the DNS configuration. The addresses array requires objects with the following fields: Table 22.11. ipam.addresses[] array Field Type Description address string An IP address and network prefix that you specify. For example, if you specify 10.10.21.10/24 , then the additional network is assigned an IP address of 10.10.21.10 and the netmask is 255.255.255.0 . gateway string The default gateway to route egress network traffic to. Table 22.12. ipam.routes[] array Field Type Description dst string The IP address range in CIDR format, such as 192.168.17.0/24 or 0.0.0.0/0 for the default route. gw string The gateway where network traffic is routed. Table 22.13. ipam.dns object Field Type Description nameservers array An array of one or more IP addresses for to send DNS queries to. domain array The default domain to append to a hostname. For example, if the domain is set to example.com , a DNS lookup query for example-host is rewritten as example-host.example.com . search array An array of domain names to append to an unqualified hostname, such as example-host , during a DNS lookup query. Static IP address assignment configuration example { "ipam": { "type": "static", "addresses": [ { "address": "191.168.1.7/24" } ] } } 22.2.5.2. Dynamic IP address (DHCP) assignment configuration The following JSON describes the configuration for dynamic IP address address assignment with DHCP. Renewal of DHCP leases A pod obtains its original DHCP lease when it is created. The lease must be periodically renewed by a minimal DHCP server deployment running on the cluster. To trigger the deployment of the DHCP server, you must create a shim network attachment by editing the Cluster Network Operator configuration, as in the following example: Example shim network attachment definition apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: - name: dhcp-shim namespace: default type: Raw rawCNIConfig: |- { "name": "dhcp-shim", "cniVersion": "0.3.1", "type": "bridge", "ipam": { "type": "dhcp" } } # ... Table 22.14. ipam DHCP configuration object Field Type Description type string The IPAM address type. The value dhcp is required. Dynamic IP address (DHCP) assignment configuration example { "ipam": { "type": "dhcp" } } 22.2.5.3. Dynamic IP address assignment configuration with Whereabouts The Whereabouts CNI plugin allows the dynamic assignment of an IP address to an additional network without the use of a DHCP server. The following table describes the configuration for dynamic IP address assignment with Whereabouts: Table 22.15. ipam whereabouts configuration object Field Type Description type string The IPAM address type. The value whereabouts is required. range string An IP address and range in CIDR notation. IP addresses are assigned from within this range of addresses. exclude array Optional: A list of zero or more IP addresses and ranges in CIDR notation. IP addresses within an excluded address range are not assigned. Dynamic IP address assignment configuration example that uses Whereabouts { "ipam": { "type": "whereabouts", "range": "192.0.2.192/27", "exclude": [ "192.0.2.192/30", "192.0.2.196/32" ] } } 22.2.5.4. Creating a whereabouts-reconciler daemon set The Whereabouts reconciler is responsible for managing dynamic IP address assignments for the pods within a cluster by using the Whereabouts IP Address Management (IPAM) solution. It ensures that each pod gets a unique IP address from the specified IP address range. It also handles IP address releases when pods are deleted or scaled down. Note You can also use a NetworkAttachmentDefinition custom resource definition (CRD) for dynamic IP address assignment. The whereabouts-reconciler daemon set is automatically created when you configure an additional network through the Cluster Network Operator. It is not automatically created when you configure an additional network from a YAML manifest. To trigger the deployment of the whereabouts-reconciler daemon set, you must manually create a whereabouts-shim network attachment by editing the Cluster Network Operator custom resource (CR) file. Use the following procedure to deploy the whereabouts-reconciler daemon set. Procedure Edit the Network.operator.openshift.io custom resource (CR) by running the following command: USD oc edit network.operator.openshift.io cluster Include the additionalNetworks section shown in this example YAML extract within the spec definition of the custom resource (CR): apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster # ... spec: additionalNetworks: - name: whereabouts-shim namespace: default rawCNIConfig: |- { "name": "whereabouts-shim", "cniVersion": "0.3.1", "type": "bridge", "ipam": { "type": "whereabouts" } } type: Raw # ... Save the file and exit the text editor. Verify that the whereabouts-reconciler daemon set deployed successfully by running the following command: USD oc get all -n openshift-multus | grep whereabouts-reconciler Example output pod/whereabouts-reconciler-jnp6g 1/1 Running 0 6s pod/whereabouts-reconciler-k76gg 1/1 Running 0 6s pod/whereabouts-reconciler-k86t9 1/1 Running 0 6s pod/whereabouts-reconciler-p4sxw 1/1 Running 0 6s pod/whereabouts-reconciler-rvfdv 1/1 Running 0 6s pod/whereabouts-reconciler-svzw9 1/1 Running 0 6s daemonset.apps/whereabouts-reconciler 6 6 6 6 6 kubernetes.io/os=linux 6s 22.2.5.5. Configuring the Whereabouts IP reconciler schedule The Whereabouts IPAM CNI plugin runs the IP reconciler daily. This process cleans up any stranded IP allocations that might result in exhausting IPs and therefore prevent new pods from getting an IP allocated to them. Use this procedure to change the frequency at which the IP reconciler runs. Prerequisites You installed the OpenShift CLI ( oc ). You have access to the cluster as a user with the cluster-admin role. You have deployed the whereabouts-reconciler daemon set, and the whereabouts-reconciler pods are up and running. Procedure Run the following command to create a ConfigMap object named whereabouts-config in the openshift-multus namespace with a specific cron expression for the IP reconciler: USD oc create configmap whereabouts-config -n openshift-multus --from-literal=reconciler_cron_expression="*/15 * * * *" This cron expression indicates the IP reconciler runs every 15 minutes. Adjust the expression based on your specific requirements. Note The whereabouts-reconciler daemon set can only consume a cron expression pattern that includes five asterisks. The sixth, which is used to denote seconds, is currently not supported. Retrieve information about resources related to the whereabouts-reconciler daemon set and pods within the openshift-multus namespace by running the following command: USD oc get all -n openshift-multus | grep whereabouts-reconciler Example output pod/whereabouts-reconciler-2p7hw 1/1 Running 0 4m14s pod/whereabouts-reconciler-76jk7 1/1 Running 0 4m14s pod/whereabouts-reconciler-94zw6 1/1 Running 0 4m14s pod/whereabouts-reconciler-mfh68 1/1 Running 0 4m14s pod/whereabouts-reconciler-pgshz 1/1 Running 0 4m14s pod/whereabouts-reconciler-xn5xz 1/1 Running 0 4m14s daemonset.apps/whereabouts-reconciler 6 6 6 6 6 kubernetes.io/os=linux 4m16s Run the following command to verify that the whereabouts-reconciler pod runs the IP reconciler with the configured interval: USD oc -n openshift-multus logs whereabouts-reconciler-2p7hw Example output 2024-02-02T16:33:54Z [debug] event not relevant: "/cron-schedule/..2024_02_02_16_33_54.1375928161": CREATE 2024-02-02T16:33:54Z [debug] event not relevant: "/cron-schedule/..2024_02_02_16_33_54.1375928161": CHMOD 2024-02-02T16:33:54Z [debug] event not relevant: "/cron-schedule/..data_tmp": RENAME 2024-02-02T16:33:54Z [verbose] using expression: */15 * * * * 2024-02-02T16:33:54Z [verbose] configuration updated to file "/cron-schedule/..data". New cron expression: */15 * * * * 2024-02-02T16:33:54Z [verbose] successfully updated CRON configuration id "00c2d1c9-631d-403f-bb86-73ad104a6817" - new cron expression: */15 * * * * 2024-02-02T16:33:54Z [debug] event not relevant: "/cron-schedule/config": CREATE 2024-02-02T16:33:54Z [debug] event not relevant: "/cron-schedule/..2024_02_02_16_26_17.3874177937": REMOVE 2024-02-02T16:45:00Z [verbose] starting reconciler run 2024-02-02T16:45:00Z [debug] NewReconcileLooper - inferred connection data 2024-02-02T16:45:00Z [debug] listing IP pools 2024-02-02T16:45:00Z [debug] no IP addresses to cleanup 2024-02-02T16:45:00Z [verbose] reconciler success 22.2.5.6. Creating a configuration for assignment of dual-stack IP addresses dynamically Dual-stack IP address assignment can be configured with the ipRanges parameter for: IPv4 addresses IPv6 addresses multiple IP address assignment Procedure Set type to whereabouts . Use ipRanges to allocate IP addresses as shown in the following example: cniVersion: operator.openshift.io/v1 kind: Network =metadata: name: cluster spec: additionalNetworks: - name: whereabouts-shim namespace: default type: Raw rawCNIConfig: |- { "name": "whereabouts-dual-stack", "cniVersion": "0.3.1, "type": "bridge", "ipam": { "type": "whereabouts", "ipRanges": [ {"range": "192.168.10.0/24"}, {"range": "2001:db8::/64"} ] } } Attach network to a pod. For more information, see "Adding a pod to an additional network". Verify that all IP addresses are assigned. Run the following command to ensure the IP addresses are assigned as metadata. USD oc exec -it mypod -- ip a Additional resources Attaching a pod to an additional network 22.2.6. Creating an additional network attachment with the Cluster Network Operator The Cluster Network Operator (CNO) manages additional network definitions. When you specify an additional network to create, the CNO creates the NetworkAttachmentDefinition CRD automatically. Important Do not edit the NetworkAttachmentDefinition CRDs that the Cluster Network Operator manages. Doing so might disrupt network traffic on your additional network. Prerequisites Install the OpenShift CLI ( oc ). Log in as a user with cluster-admin privileges. Procedure Optional: Create the namespace for the additional networks: USD oc create namespace <namespace_name> To edit the CNO configuration, enter the following command: USD oc edit networks.operator.openshift.io cluster Modify the CR that you are creating by adding the configuration for the additional network that you are creating, as in the following example CR. apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: # ... additionalNetworks: - name: tertiary-net namespace: namespace2 type: Raw rawCNIConfig: |- { "cniVersion": "0.3.1", "name": "tertiary-net", "type": "ipvlan", "master": "eth1", "mode": "l2", "ipam": { "type": "static", "addresses": [ { "address": "192.168.1.23/24" } ] } } Save your changes and quit the text editor to commit your changes. Verification Confirm that the CNO created the NetworkAttachmentDefinition CRD by running the following command. There might be a delay before the CNO creates the CRD. USD oc get network-attachment-definitions -n <namespace> where: <namespace> Specifies the namespace for the network attachment that you added to the CNO configuration. Example output NAME AGE test-network-1 14m 22.2.7. Creating an additional network attachment by applying a YAML manifest Prerequisites Install the OpenShift CLI ( oc ). Log in as a user with cluster-admin privileges. Procedure Create a YAML file with your additional network configuration, such as in the following example: apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: -net spec: config: |- { "cniVersion": "0.3.1", "name": "work-network", "type": "host-device", "device": "eth1", "ipam": { "type": "dhcp" } } To create the additional network, enter the following command: USD oc apply -f <file>.yaml where: <file> Specifies the name of the file contained the YAML manifest. 22.2.8. About configuring the master interface in the container network namespace You can create a MAC-VLAN, an IP-VLAN, or a VLAN subinterface that is based on a master interface that exists in a container namespace. You can also create a master interface as part of the pod network configuration in a separate network attachment definition CRD. To use a container namespace master interface, you must specify true for the linkInContainer parameter that exists in the subinterface configuration of the NetworkAttachmentDefinition CRD. 22.2.8.1. Creating multiple VLANs on SR-IOV VFs An example use case for utilizing this feature is to create multiple VLANs based on SR-IOV VFs. To do so, begin by creating an SR-IOV network and then define the network attachments for the VLAN interfaces. The following example shows how to configure the setup illustrated in this diagram. Figure 22.1. Creating VLANs Prerequisites You installed the OpenShift CLI ( oc ). You have access to the cluster as a user with the cluster-admin role. You have installed the SR-IOV Network Operator. Procedure Create a dedicated container namespace where you want to deploy your pod by using the following command: USD oc new-project test-namespace Create an SR-IOV node policy: Create an SriovNetworkNodePolicy object, and then save the YAML in the sriov-node-network-policy.yaml file: apiVersion: sriovnetwork.openshift.io/v1 kind: SriovNetworkNodePolicy metadata: name: sriovnic namespace: openshift-sriov-network-operator spec: deviceType: netdevice isRdma: false needVhostNet: true nicSelector: vendor: "15b3" 1 deviceID: "101b" 2 rootDevices: ["00:05.0"] numVfs: 10 priority: 99 resourceName: sriovnic nodeSelector: feature.node.kubernetes.io/network-sriov.capable: "true" Note The SR-IOV network node policy configuration example, with the setting deviceType: netdevice , is tailored specifically for Mellanox Network Interface Cards (NICs). 1 The vendor hexadecimal code of the SR-IOV network device. The value 15b3 is associated with a Mellanox NIC. 2 The device hexadecimal code of the SR-IOV network device. Apply the YAML by running the following command: USD oc apply -f sriov-node-network-policy.yaml Note Applying this might take some time due to the node requiring a reboot. Create an SR-IOV network: Create the SriovNetwork custom resource (CR) for the additional SR-IOV network attachment as in the following example CR. Save the YAML as the file sriov-network-attachment.yaml : apiVersion: sriovnetwork.openshift.io/v1 kind: SriovNetwork metadata: name: sriov-network namespace: openshift-sriov-network-operator spec: networkNamespace: test-namespace resourceName: sriovnic spoofChk: "off" trust: "on" Apply the YAML by running the following command: USD oc apply -f sriov-network-attachment.yaml Create the VLAN additional network: Using the following YAML example, create a file named vlan100-additional-network-configuration.yaml : apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: vlan-100 namespace: test-namespace spec: config: | { "cniVersion": "0.4.0", "name": "vlan-100", "plugins": [ { "type": "vlan", "master": "ext0", 1 "mtu": 1500, "vlanId": 100, "linkInContainer": true, 2 "ipam": {"type": "whereabouts", "ipRanges": [{"range": "1.1.1.0/24"}]} } ] } 1 The VLAN configuration needs to specify the master name. This can be configured in the pod networks annotation. 2 The linkInContainer parameter must be specified. Apply the YAML file by running the following command: USD oc apply -f vlan100-additional-network-configuration.yaml Create a pod definition by using the earlier specified networks: Using the following YAML example, create a file named pod-a.yaml file: Note The manifest below includes 2 resources: Namespace with security labels Pod definition with appropriate network annotation apiVersion: v1 kind: Namespace metadata: name: test-namespace labels: pod-security.kubernetes.io/enforce: privileged pod-security.kubernetes.io/audit: privileged pod-security.kubernetes.io/warn: privileged security.openshift.io/scc.podSecurityLabelSync: "false" --- apiVersion: v1 kind: Pod metadata: name: nginx-pod namespace: test-namespace annotations: k8s.v1.cni.cncf.io/networks: '[ { "name": "sriov-network", "namespace": "test-namespace", "interface": "ext0" 1 }, { "name": "vlan-100", "namespace": "test-namespace", "interface": "ext0.100" } ]' spec: securityContext: runAsNonRoot: true containers: - name: nginx-container image: nginxinc/nginx-unprivileged:latest securityContext: allowPrivilegeEscalation: false capabilities: drop: ["ALL"] ports: - containerPort: 80 seccompProfile: type: "RuntimeDefault" 1 The name to be used as the master for the VLAN interface. Apply the YAML file by running the following command: USD oc apply -f pod-a.yaml Get detailed information about the nginx-pod within the test-namespace by running the following command: USD oc describe pods nginx-pod -n test-namespace Example output Name: nginx-pod Namespace: test-namespace Priority: 0 Node: worker-1/10.46.186.105 Start Time: Mon, 14 Aug 2023 16:23:13 -0400 Labels: <none> Annotations: k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.131.0.26/23"],"mac_address":"0a:58:0a:83:00:1a","gateway_ips":["10.131.0.1"],"routes":[{"dest":"10.128.0.0... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.131.0.26" ], "mac": "0a:58:0a:83:00:1a", "default": true, "dns": {} },{ "name": "test-namespace/sriov-network", "interface": "ext0", "mac": "6e:a7:5e:3f:49:1b", "dns": {}, "device-info": { "type": "pci", "version": "1.0.0", "pci": { "pci-address": "0000:d8:00.2" } } },{ "name": "test-namespace/vlan-100", "interface": "ext0.100", "ips": [ "1.1.1.1" ], "mac": "6e:a7:5e:3f:49:1b", "dns": {} }] k8s.v1.cni.cncf.io/networks: [ { "name": "sriov-network", "namespace": "test-namespace", "interface": "ext0" }, { "name": "vlan-100", "namespace": "test-namespace", "i... openshift.io/scc: privileged Status: Running IP: 10.131.0.26 IPs: IP: 10.131.0.26 22.2.8.2. Creating a subinterface based on a bridge master interface in a container namespace You can create a subinterface based on a bridge master interface that exists in a container namespace. Creating a subinterface can be applied to other types of interfaces. Prerequisites You have installed the OpenShift CLI ( oc ). You are logged in to the OpenShift Container Platform cluster as a user with cluster-admin privileges. Procedure Create a dedicated container namespace where you want to deploy your pod by entering the following command: USD oc new-project test-namespace Using the following YAML example, create a bridge NetworkAttachmentDefinition custom resource definition (CRD) file named bridge-nad.yaml : apiVersion: "k8s.cni.cncf.io/v1" kind: NetworkAttachmentDefinition metadata: name: bridge-network spec: config: '{ "cniVersion": "0.4.0", "name": "bridge-network", "type": "bridge", "bridge": "br-001", "isGateway": true, "ipMasq": true, "hairpinMode": true, "ipam": { "type": "host-local", "subnet": "10.0.0.0/24", "routes": [{"dst": "0.0.0.0/0"}] } }' Run the following command to apply the NetworkAttachmentDefinition CRD to your OpenShift Container Platform cluster: USD oc apply -f bridge-nad.yaml Verify that you successfully created a NetworkAttachmentDefinition CRD by entering the following command: USD oc get network-attachment-definitions Example output NAME AGE bridge-network 15s Using the following YAML example, create a file named ipvlan-additional-network-configuration.yaml for the IPVLAN additional network configuration: apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: ipvlan-net namespace: test-namespace spec: config: '{ "cniVersion": "0.3.1", "name": "ipvlan-net", "type": "ipvlan", "master": "ext0", 1 "mode": "l3", "linkInContainer": true, 2 "ipam": {"type": "whereabouts", "ipRanges": [{"range": "10.0.0.0/24"}]} }' 1 Specifies the ethernet interface to associate with the network attachment. This is subsequently configured in the pod networks annotation. 2 Specifies that the master interface is in the container network namespace. Apply the YAML file by running the following command: USD oc apply -f ipvlan-additional-network-configuration.yaml Verify that the NetworkAttachmentDefinition CRD has been created successfully by running the following command: USD oc get network-attachment-definitions Example output NAME AGE bridge-network 87s ipvlan-net 9s Using the following YAML example, create a file named pod-a.yaml for the pod definition: apiVersion: v1 kind: Pod metadata: name: pod-a namespace: test-namespace annotations: k8s.v1.cni.cncf.io/networks: '[ { "name": "bridge-network", "interface": "ext0" 1 }, { "name": "ipvlan-net", "interface": "ext1" } ]' spec: securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault containers: - name: test-pod image: quay.io/openshifttest/hello-sdn@sha256:c89445416459e7adea9a5a416b3365ed3d74f2491beb904d61dc8d1eb89a72a4 securityContext: allowPrivilegeEscalation: false capabilities: drop: [ALL] 1 Specifies the name to be used as the master for the IPVLAN interface. Apply the YAML file by running the following command: USD oc apply -f pod-a.yaml Verify that the pod is running by using the following command: USD oc get pod -n test-namespace Example output NAME READY STATUS RESTARTS AGE pod-a 1/1 Running 0 2m36s Show network interface information about the pod-a resource within the test-namespace by running the following command: USD oc exec -n test-namespace pod-a -- ip a Example output 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 3: eth0@if105: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP group default link/ether 0a:58:0a:d9:00:5d brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.217.0.93/23 brd 10.217.1.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::488b:91ff:fe84:a94b/64 scope link valid_lft forever preferred_lft forever 4: ext0@if107: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether be:da:bd:7e:f4:37 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.0.0.2/24 brd 10.0.0.255 scope global ext0 valid_lft forever preferred_lft forever inet6 fe80::bcda:bdff:fe7e:f437/64 scope link valid_lft forever preferred_lft forever 5: ext1@ext0: <BROADCAST,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default link/ether be:da:bd:7e:f4:37 brd ff:ff:ff:ff:ff:ff inet 10.0.0.1/24 brd 10.0.0.255 scope global ext1 valid_lft forever preferred_lft forever inet6 fe80::beda:bd00:17e:f437/64 scope link valid_lft forever preferred_lft forever This output shows that the network interface ext1 is associated with the physical interface ext0 . 22.3. About virtual routing and forwarding 22.3.1. About virtual routing and forwarding Virtual routing and forwarding (VRF) devices combined with IP rules provide the ability to create virtual routing and forwarding domains. VRF reduces the number of permissions needed by CNF, and provides increased visibility of the network topology of secondary networks. VRF is used to provide multi-tenancy functionality, for example, where each tenant has its own unique routing tables and requires different default gateways. Processes can bind a socket to the VRF device. Packets through the binded socket use the routing table associated with the VRF device. An important feature of VRF is that it impacts only OSI model layer 3 traffic and above so L2 tools, such as LLDP, are not affected. This allows higher priority IP rules such as policy based routing to take precedence over the VRF device rules directing specific traffic. 22.3.1.1. Benefits of secondary networks for pods for telecommunications operators In telecommunications use cases, each CNF can potentially be connected to multiple different networks sharing the same address space. These secondary networks can potentially conflict with the cluster's main network CIDR. Using the CNI VRF plugin, network functions can be connected to different customers' infrastructure using the same IP address, keeping different customers isolated. IP addresses are overlapped with OpenShift Container Platform IP space. The CNI VRF plugin also reduces the number of permissions needed by CNF and increases the visibility of network topologies of secondary networks. 22.4. Configuring multi-network policy As a cluster administrator, you can configure multi-network for additional networks. You can specify multi-network policy for SR-IOV, macvlan, and OVN-Kubernetes additional networks. Macvlan additional networks are fully supported. Other types of additional networks, such as ipvlan, are not supported. Important Support for configuring multi-network policies for SR-IOV additional networks is a Technology Preview feature and is only supported with kernel network interface cards (NICs). SR-IOV is not supported for Data Plane Development Kit (DPDK) applications. For more information about the support scope of Red Hat Technology Preview features, see Technology Preview Features Support Scope . Note Configured network policies are ignored in IPv6 networks. 22.4.1. Differences between multi-network policy and network policy Although the MultiNetworkPolicy API implements the NetworkPolicy API, there are several important differences: You must use the MultiNetworkPolicy API: apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy You must use the multi-networkpolicy resource name when using the CLI to interact with multi-network policies. For example, you can view a multi-network policy object with the oc get multi-networkpolicy <name> command where <name> is the name of a multi-network policy. You must specify an annotation with the name of the network attachment definition that defines the macvlan or SR-IOV additional network: apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> where: <network_name> Specifies the name of a network attachment definition. 22.4.2. Enabling multi-network policy for the cluster As a cluster administrator, you can enable multi-network policy support on your cluster. Prerequisites Install the OpenShift CLI ( oc ). Log in to the cluster with a user with cluster-admin privileges. Procedure Create the multinetwork-enable-patch.yaml file with the following YAML: apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: useMultiNetworkPolicy: true Configure the cluster to enable multi-network policy: USD oc patch network.operator.openshift.io cluster --type=merge --patch-file=multinetwork-enable-patch.yaml Example output network.operator.openshift.io/cluster patched 22.4.3. Working with multi-network policy As a cluster administrator, you can create, edit, view, and delete multi-network policies. 22.4.3.1. Prerequisites You have enabled multi-network policy support for your cluster. 22.4.3.2. Creating a multi-network policy using the CLI To define granular rules describing ingress or egress network traffic allowed for namespaces in your cluster, you can create a multi-network policy. Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace that the multi-network policy applies to. Procedure Create a policy rule: Create a <policy_name>.yaml file: USD touch <policy_name>.yaml where: <policy_name> Specifies the multi-network policy file name. Define a multi-network policy in the file that you just created, such as in the following examples: Deny ingress from all pods in all namespaces This is a fundamental policy, blocking all cross-pod networking other than cross-pod traffic allowed by the configuration of other Network Policies. apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: deny-by-default annotations: k8s.v1.cni.cncf.io/policy-for:<namespace_name>/<network_name> spec: podSelector: {} policyTypes: - Ingress ingress: [] where: <network_name> Specifies the name of a network attachment definition. Allow ingress from all pods in the same namespace apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: allow-same-namespace annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: ingress: - from: - podSelector: {} where: <network_name> Specifies the name of a network attachment definition. Allow ingress traffic to one pod from a particular namespace This policy allows traffic to pods labelled pod-a from pods running in namespace-y . apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: allow-traffic-pod annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: pod: pod-a policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: namespace-y where: <network_name> Specifies the name of a network attachment definition. Restrict traffic to a service This policy when applied ensures every pod with both labels app=bookstore and role=api can only be accessed by pods with label app=bookstore . In this example the application could be a REST API server, marked with labels app=bookstore and role=api . This example addresses the following use cases: Restricting the traffic to a service to only the other microservices that need to use it. Restricting the connections to a database to only permit the application using it. apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: api-allow annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: app: bookstore role: api ingress: - from: - podSelector: matchLabels: app: bookstore where: <network_name> Specifies the name of a network attachment definition. To create the multi-network policy object, enter the following command: USD oc apply -f <policy_name>.yaml -n <namespace> where: <policy_name> Specifies the multi-network policy file name. <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. Example output multinetworkpolicy.k8s.cni.cncf.io/deny-by-default created Note If you log in to the web console with cluster-admin privileges, you have a choice of creating a network policy in any namespace in the cluster directly in YAML or from a form in the web console. 22.4.3.3. Editing a multi-network policy You can edit a multi-network policy in a namespace. Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace where the multi-network policy exists. Procedure Optional: To list the multi-network policy objects in a namespace, enter the following command: USD oc get multi-networkpolicy where: <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. Edit the multi-network policy object. If you saved the multi-network policy definition in a file, edit the file and make any necessary changes, and then enter the following command. USD oc apply -n <namespace> -f <policy_file>.yaml where: <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. <policy_file> Specifies the name of the file containing the network policy. If you need to update the multi-network policy object directly, enter the following command: USD oc edit multi-networkpolicy <policy_name> -n <namespace> where: <policy_name> Specifies the name of the network policy. <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. Confirm that the multi-network policy object is updated. USD oc describe multi-networkpolicy <policy_name> -n <namespace> where: <policy_name> Specifies the name of the multi-network policy. <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. Note If you log in to the web console with cluster-admin privileges, you have a choice of editing a network policy in any namespace in the cluster directly in YAML or from the policy in the web console through the Actions menu. 22.4.3.4. Viewing multi-network policies using the CLI You can examine the multi-network policies in a namespace. Prerequisites You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace where the multi-network policy exists. Procedure List multi-network policies in a namespace: To view multi-network policy objects defined in a namespace, enter the following command: USD oc get multi-networkpolicy Optional: To examine a specific multi-network policy, enter the following command: USD oc describe multi-networkpolicy <policy_name> -n <namespace> where: <policy_name> Specifies the name of the multi-network policy to inspect. <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. Note If you log in to the web console with cluster-admin privileges, you have a choice of viewing a network policy in any namespace in the cluster directly in YAML or from a form in the web console. 22.4.3.5. Deleting a multi-network policy using the CLI You can delete a multi-network policy in a namespace. Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace where the multi-network policy exists. Procedure To delete a multi-network policy object, enter the following command: USD oc delete multi-networkpolicy <policy_name> -n <namespace> where: <policy_name> Specifies the name of the multi-network policy. <namespace> Optional: Specifies the namespace if the object is defined in a different namespace than the current namespace. Example output multinetworkpolicy.k8s.cni.cncf.io/default-deny deleted Note If you log in to the web console with cluster-admin privileges, you have a choice of deleting a network policy in any namespace in the cluster directly in YAML or from the policy in the web console through the Actions menu. 22.4.3.6. Creating a default deny all multi-network policy This is a fundamental policy, blocking all cross-pod networking other than network traffic allowed by the configuration of other deployed network policies. This procedure enforces a default deny-by-default policy. Note If you log in with a user with the cluster-admin role, then you can create a network policy in any namespace in the cluster. Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace that the multi-network policy applies to. Procedure Create the following YAML that defines a deny-by-default policy to deny ingress from all pods in all namespaces. Save the YAML in the deny-by-default.yaml file: apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: deny-by-default namespace: default 1 annotations: k8s.v1.cni.cncf.io/policy-for: <namespace_name>/<network_name> 2 spec: podSelector: {} 3 policyTypes: 4 - Ingress 5 ingress: [] 6 1 namespace: default deploys this policy to the default namespace. 2 network_name : specifies the name of a network attachment definition. 3 podSelector: is empty, this means it matches all the pods. Therefore, the policy applies to all pods in the default namespace. 4 policyTypes: a list of rule types that the NetworkPolicy relates to. 5 Specifies as Ingress only policyType . 6 There are no ingress rules specified. This causes incoming traffic to be dropped to all pods. Apply the policy by entering the following command: USD oc apply -f deny-by-default.yaml Example output multinetworkpolicy.k8s.cni.cncf.io/deny-by-default created 22.4.3.7. Creating a multi-network policy to allow traffic from external clients With the deny-by-default policy in place you can proceed to configure a policy that allows traffic from external clients to a pod with the label app=web . Note If you log in with a user with the cluster-admin role, then you can create a network policy in any namespace in the cluster. Follow this procedure to configure a policy that allows external service from the public Internet directly or by using a Load Balancer to access the pod. Traffic is only allowed to a pod with the label app=web . Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace that the multi-network policy applies to. Procedure Create a policy that allows traffic from the public Internet directly or by using a load balancer to access the pod. Save the YAML in the web-allow-external.yaml file: apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: web-allow-external namespace: default annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: policyTypes: - Ingress podSelector: matchLabels: app: web ingress: - {} Apply the policy by entering the following command: USD oc apply -f web-allow-external.yaml Example output multinetworkpolicy.k8s.cni.cncf.io/web-allow-external created This policy allows traffic from all resources, including external traffic as illustrated in the following diagram: 22.4.3.8. Creating a multi-network policy allowing traffic to an application from all namespaces Note If you log in with a user with the cluster-admin role, then you can create a network policy in any namespace in the cluster. Follow this procedure to configure a policy that allows traffic from all pods in all namespaces to a particular application. Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace that the multi-network policy applies to. Procedure Create a policy that allows traffic from all pods in all namespaces to a particular application. Save the YAML in the web-allow-all-namespaces.yaml file: apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: web-allow-all-namespaces namespace: default annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: app: web 1 policyTypes: - Ingress ingress: - from: - namespaceSelector: {} 2 1 Applies the policy only to app:web pods in default namespace. 2 Selects all pods in all namespaces. Note By default, if you omit specifying a namespaceSelector it does not select any namespaces, which means the policy allows traffic only from the namespace the network policy is deployed to. Apply the policy by entering the following command: USD oc apply -f web-allow-all-namespaces.yaml Example output multinetworkpolicy.k8s.cni.cncf.io/web-allow-all-namespaces created Verification Start a web service in the default namespace by entering the following command: USD oc run web --namespace=default --image=nginx --labels="app=web" --expose --port=80 Run the following command to deploy an alpine image in the secondary namespace and to start a shell: USD oc run test-USDRANDOM --namespace=secondary --rm -i -t --image=alpine -- sh Run the following command in the shell and observe that the request is allowed: # wget -qO- --timeout=2 http://web.default Expected output <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html> 22.4.3.9. Creating a multi-network policy allowing traffic to an application from a namespace Note If you log in with a user with the cluster-admin role, then you can create a network policy in any namespace in the cluster. Follow this procedure to configure a policy that allows traffic to a pod with the label app=web from a particular namespace. You might want to do this to: Restrict traffic to a production database only to namespaces where production workloads are deployed. Enable monitoring tools deployed to a particular namespace to scrape metrics from the current namespace. Prerequisites Your cluster uses a network plugin that supports NetworkPolicy objects, such as the OVN-Kubernetes network plugin or the OpenShift SDN network plugin with mode: NetworkPolicy set. This mode is the default for OpenShift SDN. You installed the OpenShift CLI ( oc ). You are logged in to the cluster with a user with cluster-admin privileges. You are working in the namespace that the multi-network policy applies to. Procedure Create a policy that allows traffic from all pods in a particular namespaces with a label purpose=production . Save the YAML in the web-allow-prod.yaml file: apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: web-allow-prod namespace: default annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: app: web 1 policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: purpose: production 2 1 Applies the policy only to app:web pods in the default namespace. 2 Restricts traffic to only pods in namespaces that have the label purpose=production . Apply the policy by entering the following command: USD oc apply -f web-allow-prod.yaml Example output multinetworkpolicy.k8s.cni.cncf.io/web-allow-prod created Verification Start a web service in the default namespace by entering the following command: USD oc run web --namespace=default --image=nginx --labels="app=web" --expose --port=80 Run the following command to create the prod namespace: USD oc create namespace prod Run the following command to label the prod namespace: USD oc label namespace/prod purpose=production Run the following command to create the dev namespace: USD oc create namespace dev Run the following command to label the dev namespace: USD oc label namespace/dev purpose=testing Run the following command to deploy an alpine image in the dev namespace and to start a shell: USD oc run test-USDRANDOM --namespace=dev --rm -i -t --image=alpine -- sh Run the following command in the shell and observe that the request is blocked: # wget -qO- --timeout=2 http://web.default Expected output wget: download timed out Run the following command to deploy an alpine image in the prod namespace and start a shell: USD oc run test-USDRANDOM --namespace=prod --rm -i -t --image=alpine -- sh Run the following command in the shell and observe that the request is allowed: # wget -qO- --timeout=2 http://web.default Expected output <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html> 22.4.4. Additional resources About network policy Understanding multiple networks Configuring a macvlan network Configuring an SR-IOV network device 22.5. Attaching a pod to an additional network As a cluster user you can attach a pod to an additional network. 22.5.1. Adding a pod to an additional network You can add a pod to an additional network. The pod continues to send normal cluster-related network traffic over the default network. When a pod is created additional networks are attached to it. However, if a pod already exists, you cannot attach additional networks to it. The pod must be in the same namespace as the additional network. Prerequisites Install the OpenShift CLI ( oc ). Log in to the cluster. Procedure Add an annotation to the Pod object. Only one of the following annotation formats can be used: To attach an additional network without any customization, add an annotation with the following format. Replace <network> with the name of the additional network to associate with the pod: metadata: annotations: k8s.v1.cni.cncf.io/networks: <network>[,<network>,...] 1 1 To specify more than one additional network, separate each network with a comma. Do not include whitespace between the comma. If you specify the same additional network multiple times, that pod will have multiple network interfaces attached to that network. To attach an additional network with customizations, add an annotation with the following format: metadata: annotations: k8s.v1.cni.cncf.io/networks: |- [ { "name": "<network>", 1 "namespace": "<namespace>", 2 "default-route": ["<default-route>"] 3 } ] 1 Specify the name of the additional network defined by a NetworkAttachmentDefinition object. 2 Specify the namespace where the NetworkAttachmentDefinition object is defined. 3 Optional: Specify an override for the default route, such as 192.168.17.1 . To create the pod, enter the following command. Replace <name> with the name of the pod. USD oc create -f <name>.yaml Optional: To Confirm that the annotation exists in the Pod CR, enter the following command, replacing <name> with the name of the pod. USD oc get pod <name> -o yaml In the following example, the example-pod pod is attached to the net1 additional network: USD oc get pod example-pod -o yaml apiVersion: v1 kind: Pod metadata: annotations: k8s.v1.cni.cncf.io/networks: macvlan-bridge k8s.v1.cni.cncf.io/network-status: |- 1 [{ "name": "openshift-sdn", "interface": "eth0", "ips": [ "10.128.2.14" ], "default": true, "dns": {} },{ "name": "macvlan-bridge", "interface": "net1", "ips": [ "20.2.2.100" ], "mac": "22:2f:60:a5:f8:00", "dns": {} }] name: example-pod namespace: default spec: ... status: ... 1 The k8s.v1.cni.cncf.io/network-status parameter is a JSON array of objects. Each object describes the status of an additional network attached to the pod. The annotation value is stored as a plain text value. 22.5.1.1. Specifying pod-specific addressing and routing options When attaching a pod to an additional network, you may want to specify further properties about that network in a particular pod. This allows you to change some aspects of routing, as well as specify static IP addresses and MAC addresses. To accomplish this, you can use the JSON formatted annotations. Prerequisites The pod must be in the same namespace as the additional network. Install the OpenShift CLI ( oc ). You must log in to the cluster. Procedure To add a pod to an additional network while specifying addressing and/or routing options, complete the following steps: Edit the Pod resource definition. If you are editing an existing Pod resource, run the following command to edit its definition in the default editor. Replace <name> with the name of the Pod resource to edit. USD oc edit pod <name> In the Pod resource definition, add the k8s.v1.cni.cncf.io/networks parameter to the pod metadata mapping. The k8s.v1.cni.cncf.io/networks accepts a JSON string of a list of objects that reference the name of NetworkAttachmentDefinition custom resource (CR) names in addition to specifying additional properties. metadata: annotations: k8s.v1.cni.cncf.io/networks: '[<network>[,<network>,...]]' 1 1 Replace <network> with a JSON object as shown in the following examples. The single quotes are required. In the following example the annotation specifies which network attachment will have the default route, using the default-route parameter. apiVersion: v1 kind: Pod metadata: name: example-pod annotations: k8s.v1.cni.cncf.io/networks: '[ { "name": "net1" }, { "name": "net2", 1 "default-route": ["192.0.2.1"] 2 }]' spec: containers: - name: example-pod command: ["/bin/bash", "-c", "sleep 2000000000000"] image: centos/tools 1 The name key is the name of the additional network to associate with the pod. 2 The default-route key specifies a value of a gateway for traffic to be routed over if no other routing entry is present in the routing table. If more than one default-route key is specified, this will cause the pod to fail to become active. The default route will cause any traffic that is not specified in other routes to be routed to the gateway. Important Setting the default route to an interface other than the default network interface for OpenShift Container Platform may cause traffic that is anticipated for pod-to-pod traffic to be routed over another interface. To verify the routing properties of a pod, the oc command may be used to execute the ip command within a pod. USD oc exec -it <pod_name> -- ip route Note You may also reference the pod's k8s.v1.cni.cncf.io/network-status to see which additional network has been assigned the default route, by the presence of the default-route key in the JSON-formatted list of objects. To set a static IP address or MAC address for a pod you can use the JSON formatted annotations. This requires you create networks that specifically allow for this functionality. This can be specified in a rawCNIConfig for the CNO. Edit the CNO CR by running the following command: USD oc edit networks.operator.openshift.io cluster The following YAML describes the configuration parameters for the CNO: Cluster Network Operator YAML configuration name: <name> 1 namespace: <namespace> 2 rawCNIConfig: '{ 3 ... }' type: Raw 1 Specify a name for the additional network attachment that you are creating. The name must be unique within the specified namespace . 2 Specify the namespace to create the network attachment in. If you do not specify a value, then the default namespace is used. 3 Specify the CNI plugin configuration in JSON format, which is based on the following template. The following object describes the configuration parameters for utilizing static MAC address and IP address using the macvlan CNI plugin: macvlan CNI plugin JSON configuration object using static IP and MAC address { "cniVersion": "0.3.1", "name": "<name>", 1 "plugins": [{ 2 "type": "macvlan", "capabilities": { "ips": true }, 3 "master": "eth0", 4 "mode": "bridge", "ipam": { "type": "static" } }, { "capabilities": { "mac": true }, 5 "type": "tuning" }] } 1 Specifies the name for the additional network attachment to create. The name must be unique within the specified namespace . 2 Specifies an array of CNI plugin configurations. The first object specifies a macvlan plugin configuration and the second object specifies a tuning plugin configuration. 3 Specifies that a request is made to enable the static IP address functionality of the CNI plugin runtime configuration capabilities. 4 Specifies the interface that the macvlan plugin uses. 5 Specifies that a request is made to enable the static MAC address functionality of a CNI plugin. The above network attachment can be referenced in a JSON formatted annotation, along with keys to specify which static IP and MAC address will be assigned to a given pod. Edit the pod with: USD oc edit pod <name> macvlan CNI plugin JSON configuration object using static IP and MAC address apiVersion: v1 kind: Pod metadata: name: example-pod annotations: k8s.v1.cni.cncf.io/networks: '[ { "name": "<name>", 1 "ips": [ "192.0.2.205/24" ], 2 "mac": "CA:FE:C0:FF:EE:00" 3 } ]' 1 Use the <name> as provided when creating the rawCNIConfig above. 2 Provide an IP address including the subnet mask. 3 Provide the MAC address. Note Static IP addresses and MAC addresses do not have to be used at the same time, you may use them individually, or together. To verify the IP address and MAC properties of a pod with additional networks, use the oc command to execute the ip command within a pod. USD oc exec -it <pod_name> -- ip a 22.6. Removing a pod from an additional network As a cluster user you can remove a pod from an additional network. 22.6.1. Removing a pod from an additional network You can remove a pod from an additional network only by deleting the pod. Prerequisites An additional network is attached to the pod. Install the OpenShift CLI ( oc ). Log in to the cluster. Procedure To delete the pod, enter the following command: USD oc delete pod <name> -n <namespace> <name> is the name of the pod. <namespace> is the namespace that contains the pod. 22.7. Editing an additional network As a cluster administrator you can modify the configuration for an existing additional network. 22.7.1. Modifying an additional network attachment definition As a cluster administrator, you can make changes to an existing additional network. Any existing pods attached to the additional network will not be updated. Prerequisites You have configured an additional network for your cluster. Install the OpenShift CLI ( oc ). Log in as a user with cluster-admin privileges. Procedure To edit an additional network for your cluster, complete the following steps: Run the following command to edit the Cluster Network Operator (CNO) CR in your default text editor: USD oc edit networks.operator.openshift.io cluster In the additionalNetworks collection, update the additional network with your changes. Save your changes and quit the text editor to commit your changes. Optional: Confirm that the CNO updated the NetworkAttachmentDefinition object by running the following command. Replace <network-name> with the name of the additional network to display. There might be a delay before the CNO updates the NetworkAttachmentDefinition object to reflect your changes. USD oc get network-attachment-definitions <network-name> -o yaml For example, the following console output displays a NetworkAttachmentDefinition object that is named net1 : USD oc get network-attachment-definitions net1 -o go-template='{{printf "%s\n" .spec.config}}' { "cniVersion": "0.3.1", "type": "macvlan", "master": "ens5", "mode": "bridge", "ipam": {"type":"static","routes":[{"dst":"0.0.0.0/0","gw":"10.128.2.1"}],"addresses":[{"address":"10.128.2.100/23","gateway":"10.128.2.1"}],"dns":{"nameservers":["172.30.0.10"],"domain":"us-west-2.compute.internal","search":["us-west-2.compute.internal"]}} } 22.8. Removing an additional network As a cluster administrator you can remove an additional network attachment. 22.8.1. Removing an additional network attachment definition As a cluster administrator, you can remove an additional network from your OpenShift Container Platform cluster. The additional network is not removed from any pods it is attached to. Prerequisites Install the OpenShift CLI ( oc ). Log in as a user with cluster-admin privileges. Procedure To remove an additional network from your cluster, complete the following steps: Edit the Cluster Network Operator (CNO) in your default text editor by running the following command: USD oc edit networks.operator.openshift.io cluster Modify the CR by removing the configuration from the additionalNetworks collection for the network attachment definition you are removing. apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: [] 1 1 If you are removing the configuration mapping for the only additional network attachment definition in the additionalNetworks collection, you must specify an empty collection. Save your changes and quit the text editor to commit your changes. Optional: Confirm that the additional network CR was deleted by running the following command: USD oc get network-attachment-definition --all-namespaces 22.9. Assigning a secondary network to a VRF As a cluster administrator, you can configure an additional network for a virtual routing and forwarding (VRF) domain by using the CNI VRF plugin. The virtual network that this plugin creates is associated with the physical interface that you specify. Using a secondary network with a VRF instance has the following advantages: Workload isolation Isolate workload traffic by configuring a VRF instance for the additional network. Improved security Enable improved security through isolated network paths in the VRF domain. Multi-tenancy support Support multi-tenancy through network segmentation with a unique routing table in the VRF domain for each tenant. Note Applications that use VRFs must bind to a specific device. The common usage is to use the SO_BINDTODEVICE option for a socket. The SO_BINDTODEVICE option binds the socket to the device that is specified in the passed interface name, for example, eth1 . To use the SO_BINDTODEVICE option, the application must have CAP_NET_RAW capabilities. Using a VRF through the ip vrf exec command is not supported in OpenShift Container Platform pods. To use VRF, bind applications directly to the VRF interface. Additional resources About virtual routing and forwarding 22.9.1. Creating an additional network attachment with the CNI VRF plugin The Cluster Network Operator (CNO) manages additional network definitions. When you specify an additional network to create, the CNO creates the NetworkAttachmentDefinition custom resource (CR) automatically. Note Do not edit the NetworkAttachmentDefinition CRs that the Cluster Network Operator manages. Doing so might disrupt network traffic on your additional network. To create an additional network attachment with the CNI VRF plugin, perform the following procedure. Prerequisites Install the OpenShift Container Platform CLI (oc). Log in to the OpenShift cluster as a user with cluster-admin privileges. Procedure Create the Network custom resource (CR) for the additional network attachment and insert the rawCNIConfig configuration for the additional network, as in the following example CR. Save the YAML as the file additional-network-attachment.yaml . apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: - name: test-network-1 namespace: additional-network-1 type: Raw rawCNIConfig: '{ "cniVersion": "0.3.1", "name": "macvlan-vrf", "plugins": [ 1 { "type": "macvlan", "master": "eth1", "ipam": { "type": "static", "addresses": [ { "address": "191.168.1.23/24" } ] } }, { "type": "vrf", 2 "vrfname": "vrf-1", 3 "table": 1001 4 }] }' 1 plugins must be a list. The first item in the list must be the secondary network underpinning the VRF network. The second item in the list is the VRF plugin configuration. 2 type must be set to vrf . 3 vrfname is the name of the VRF that the interface is assigned to. If it does not exist in the pod, it is created. 4 Optional. table is the routing table ID. By default, the tableid parameter is used. If it is not specified, the CNI assigns a free routing table ID to the VRF. Note VRF functions correctly only when the resource is of type netdevice . Create the Network resource: USD oc create -f additional-network-attachment.yaml Confirm that the CNO created the NetworkAttachmentDefinition CR by running the following command. Replace <namespace> with the namespace that you specified when configuring the network attachment, for example, additional-network-1 . USD oc get network-attachment-definitions -n <namespace> Example output NAME AGE additional-network-1 14m Note There might be a delay before the CNO creates the CR. Verification Create a pod and assign it to the additional network with the VRF instance: Create a YAML file that defines the Pod resource: Example pod-additional-net.yaml file apiVersion: v1 kind: Pod metadata: name: pod-additional-net annotations: k8s.v1.cni.cncf.io/networks: '[ { "name": "test-network-1" 1 } ]' spec: containers: - name: example-pod-1 command: ["/bin/bash", "-c", "sleep 9000000"] image: centos:8 1 Specify the name of the additional network with the VRF instance. Create the Pod resource by running the following command: USD oc create -f pod-additional-net.yaml Example output pod/test-pod created Verify that the pod network attachment is connected to the VRF additional network. Start a remote session with the pod and run the following command: USD ip vrf show Example output Name Table ----------------------- vrf-1 1001 Confirm that the VRF interface is the controller for the additional interface: USD ip link Example output 5: net1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master red state UP mode
[ "openstack subnet set --dns-nameserver 0.0.0.0 <subnet_id>", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: # additionalNetworks: 1 - name: <name> 2 namespace: <namespace> 3 rawCNIConfig: |- 4 { } type: Raw", "apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: <name> 1 spec: config: |- 2 { }", "bridge vlan add vid VLAN_ID dev DEV", "{ \"cniVersion\": \"0.3.1\", \"name\": \"bridge-net\", \"type\": \"bridge\", \"isGateway\": true, \"vlan\": 2, \"ipam\": { \"type\": \"dhcp\" } }", "{ \"cniVersion\": \"0.3.1\", \"name\": \"hostdev-net\", \"type\": \"host-device\", \"device\": \"eth1\" }", "{ \"name\": \"vlan-net\", \"cniVersion\": \"0.3.1\", \"type\": \"vlan\", \"master\": \"eth0\", \"mtu\": 1500, \"vlanId\": 5, \"linkInContainer\": false, \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" }, \"dns\": { \"nameservers\": [ \"10.1.1.1\", \"8.8.8.8\" ] } }", "{ \"cniVersion\": \"0.3.1\", \"name\": \"ipvlan-net\", \"type\": \"ipvlan\", \"master\": \"eth1\", \"linkInContainer\": false, \"mode\": \"l3\", \"ipam\": { \"type\": \"static\", \"addresses\": [ { \"address\": \"192.168.10.10/24\" } ] } }", "{ \"cniVersion\": \"0.3.1\", \"name\": \"macvlan-net\", \"type\": \"macvlan\", \"master\": \"eth1\", \"linkInContainer\": false, \"mode\": \"bridge\", \"ipam\": { \"type\": \"dhcp\" } }", "{ \"name\": \"mynet\", \"cniVersion\": \"0.3.1\", \"type\": \"tap\", \"mac\": \"00:11:22:33:44:55\", \"mtu\": 1500, \"selinuxcontext\": \"system_u:system_r:container_t:s0\", \"multiQueue\": true, \"owner\": 0, \"group\": 0 \"bridge\": \"br1\" }", "apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig metadata: labels: machineconfiguration.openshift.io/role: worker name: 99-worker-setsebool spec: config: ignition: version: 3.2.0 systemd: units: - enabled: true name: setsebool.service contents: | [Unit] Description=Set SELinux boolean for the TAP CNI plugin Before=kubelet.service [Service] Type=oneshot ExecStart=/usr/sbin/setsebool container_use_devices=on RemainAfterExit=true [Install] WantedBy=multi-user.target graphical.target", "oc apply -f setsebool-container-use-devices.yaml", "oc get machineconfigpools", "NAME CONFIG UPDATED UPDATING DEGRADED MACHINECOUNT READYMACHINECOUNT UPDATEDMACHINECOUNT DEGRADEDMACHINECOUNT AGE master rendered-master-e5e0c8e8be9194e7c5a882e047379cfa True False False 3 3 3 0 7d2h worker rendered-worker-d6c9ca107fba6cd76cdcbfcedcafa0f2 True False False 3 3 3 0 7d", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: allow-same-namespace annotations: k8s.v1.cni.cncf.io/policy-for: blue2 spec: podSelector: ingress: - from: - podSelector: {}", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: ingress-ipblock annotations: k8s.v1.cni.cncf.io/policy-for: default/flatl2net spec: podSelector: matchLabels: name: access-control policyTypes: - Ingress ingress: - from: - ipBlock: cidr: 10.200.0.0/30", "{ \"cniVersion\": \"0.3.1\", \"name\": \"l2-network\", \"type\": \"ovn-k8s-cni-overlay\", \"topology\":\"layer2\", \"subnets\": \"10.100.200.0/24\", \"mtu\": 1300, \"netAttachDefName\": \"ns1/l2-network\", \"excludeSubnets\": \"10.100.200.0/29\" }", "apiVersion: nmstate.io/v1 kind: NodeNetworkConfigurationPolicy metadata: name: mapping 1 spec: nodeSelector: node-role.kubernetes.io/worker: '' 2 desiredState: ovn: bridge-mappings: - localnet: localnet1 3 bridge: br-ex 4 state: present 5", "apiVersion: nmstate.io/v1 kind: NodeNetworkConfigurationPolicy metadata: name: ovs-br1-multiple-networks 1 spec: nodeSelector: node-role.kubernetes.io/worker: '' 2 desiredState: interfaces: - name: ovs-br1 3 description: |- A dedicated OVS bridge with eth1 as a port allowing all VLANs and untagged traffic type: ovs-bridge state: up bridge: allow-extra-patch-ports: true options: stp: false port: - name: eth1 4 ovn: bridge-mappings: - localnet: localnet2 5 bridge: ovs-br1 6 state: present 7", "{ \"cniVersion\": \"0.3.1\", \"name\": \"ns1-localnet-network\", \"type\": \"ovn-k8s-cni-overlay\", \"topology\":\"localnet\", \"subnets\": \"202.10.130.112/28\", \"vlanID\": 33, \"mtu\": 1500, \"netAttachDefName\": \"ns1/localnet-network\" \"excludeSubnets\": \"10.100.200.0/29\" }", "apiVersion: v1 kind: Pod metadata: annotations: k8s.v1.cni.cncf.io/networks: l2-network name: tinypod namespace: ns1 spec: containers: - args: - pause image: k8s.gcr.io/e2e-test-images/agnhost:2.36 imagePullPolicy: IfNotPresent name: agnhost-container", "apiVersion: v1 kind: Pod metadata: annotations: k8s.v1.cni.cncf.io/networks: '[ { \"name\": \"l2-network\", 1 \"mac\": \"02:03:04:05:06:07\", 2 \"interface\": \"myiface1\", 3 \"ips\": [ \"192.0.2.20/24\" ] 4 } ]' name: tinypod namespace: ns1 spec: containers: - args: - pause image: k8s.gcr.io/e2e-test-images/agnhost:2.36 imagePullPolicy: IfNotPresent name: agnhost-container", "{ \"ipam\": { \"type\": \"static\", \"addresses\": [ { \"address\": \"191.168.1.7/24\" } ] } }", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: - name: dhcp-shim namespace: default type: Raw rawCNIConfig: |- { \"name\": \"dhcp-shim\", \"cniVersion\": \"0.3.1\", \"type\": \"bridge\", \"ipam\": { \"type\": \"dhcp\" } } #", "{ \"ipam\": { \"type\": \"dhcp\" } }", "{ \"ipam\": { \"type\": \"whereabouts\", \"range\": \"192.0.2.192/27\", \"exclude\": [ \"192.0.2.192/30\", \"192.0.2.196/32\" ] } }", "oc edit network.operator.openshift.io cluster", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: - name: whereabouts-shim namespace: default rawCNIConfig: |- { \"name\": \"whereabouts-shim\", \"cniVersion\": \"0.3.1\", \"type\": \"bridge\", \"ipam\": { \"type\": \"whereabouts\" } } type: Raw", "oc get all -n openshift-multus | grep whereabouts-reconciler", "pod/whereabouts-reconciler-jnp6g 1/1 Running 0 6s pod/whereabouts-reconciler-k76gg 1/1 Running 0 6s pod/whereabouts-reconciler-k86t9 1/1 Running 0 6s pod/whereabouts-reconciler-p4sxw 1/1 Running 0 6s pod/whereabouts-reconciler-rvfdv 1/1 Running 0 6s pod/whereabouts-reconciler-svzw9 1/1 Running 0 6s daemonset.apps/whereabouts-reconciler 6 6 6 6 6 kubernetes.io/os=linux 6s", "oc create configmap whereabouts-config -n openshift-multus --from-literal=reconciler_cron_expression=\"*/15 * * * *\"", "oc get all -n openshift-multus | grep whereabouts-reconciler", "pod/whereabouts-reconciler-2p7hw 1/1 Running 0 4m14s pod/whereabouts-reconciler-76jk7 1/1 Running 0 4m14s pod/whereabouts-reconciler-94zw6 1/1 Running 0 4m14s pod/whereabouts-reconciler-mfh68 1/1 Running 0 4m14s pod/whereabouts-reconciler-pgshz 1/1 Running 0 4m14s pod/whereabouts-reconciler-xn5xz 1/1 Running 0 4m14s daemonset.apps/whereabouts-reconciler 6 6 6 6 6 kubernetes.io/os=linux 4m16s", "oc -n openshift-multus logs whereabouts-reconciler-2p7hw", "2024-02-02T16:33:54Z [debug] event not relevant: \"/cron-schedule/..2024_02_02_16_33_54.1375928161\": CREATE 2024-02-02T16:33:54Z [debug] event not relevant: \"/cron-schedule/..2024_02_02_16_33_54.1375928161\": CHMOD 2024-02-02T16:33:54Z [debug] event not relevant: \"/cron-schedule/..data_tmp\": RENAME 2024-02-02T16:33:54Z [verbose] using expression: */15 * * * * 2024-02-02T16:33:54Z [verbose] configuration updated to file \"/cron-schedule/..data\". New cron expression: */15 * * * * 2024-02-02T16:33:54Z [verbose] successfully updated CRON configuration id \"00c2d1c9-631d-403f-bb86-73ad104a6817\" - new cron expression: */15 * * * * 2024-02-02T16:33:54Z [debug] event not relevant: \"/cron-schedule/config\": CREATE 2024-02-02T16:33:54Z [debug] event not relevant: \"/cron-schedule/..2024_02_02_16_26_17.3874177937\": REMOVE 2024-02-02T16:45:00Z [verbose] starting reconciler run 2024-02-02T16:45:00Z [debug] NewReconcileLooper - inferred connection data 2024-02-02T16:45:00Z [debug] listing IP pools 2024-02-02T16:45:00Z [debug] no IP addresses to cleanup 2024-02-02T16:45:00Z [verbose] reconciler success", "cniVersion: operator.openshift.io/v1 kind: Network =metadata: name: cluster spec: additionalNetworks: - name: whereabouts-shim namespace: default type: Raw rawCNIConfig: |- { \"name\": \"whereabouts-dual-stack\", \"cniVersion\": \"0.3.1, \"type\": \"bridge\", \"ipam\": { \"type\": \"whereabouts\", \"ipRanges\": [ {\"range\": \"192.168.10.0/24\"}, {\"range\": \"2001:db8::/64\"} ] } }", "oc exec -it mypod -- ip a", "oc create namespace <namespace_name>", "oc edit networks.operator.openshift.io cluster", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: # additionalNetworks: - name: tertiary-net namespace: namespace2 type: Raw rawCNIConfig: |- { \"cniVersion\": \"0.3.1\", \"name\": \"tertiary-net\", \"type\": \"ipvlan\", \"master\": \"eth1\", \"mode\": \"l2\", \"ipam\": { \"type\": \"static\", \"addresses\": [ { \"address\": \"192.168.1.23/24\" } ] } }", "oc get network-attachment-definitions -n <namespace>", "NAME AGE test-network-1 14m", "apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: next-net spec: config: |- { \"cniVersion\": \"0.3.1\", \"name\": \"work-network\", \"type\": \"host-device\", \"device\": \"eth1\", \"ipam\": { \"type\": \"dhcp\" } }", "oc apply -f <file>.yaml", "oc new-project test-namespace", "apiVersion: sriovnetwork.openshift.io/v1 kind: SriovNetworkNodePolicy metadata: name: sriovnic namespace: openshift-sriov-network-operator spec: deviceType: netdevice isRdma: false needVhostNet: true nicSelector: vendor: \"15b3\" 1 deviceID: \"101b\" 2 rootDevices: [\"00:05.0\"] numVfs: 10 priority: 99 resourceName: sriovnic nodeSelector: feature.node.kubernetes.io/network-sriov.capable: \"true\"", "oc apply -f sriov-node-network-policy.yaml", "apiVersion: sriovnetwork.openshift.io/v1 kind: SriovNetwork metadata: name: sriov-network namespace: openshift-sriov-network-operator spec: networkNamespace: test-namespace resourceName: sriovnic spoofChk: \"off\" trust: \"on\"", "oc apply -f sriov-network-attachment.yaml", "apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: vlan-100 namespace: test-namespace spec: config: | { \"cniVersion\": \"0.4.0\", \"name\": \"vlan-100\", \"plugins\": [ { \"type\": \"vlan\", \"master\": \"ext0\", 1 \"mtu\": 1500, \"vlanId\": 100, \"linkInContainer\": true, 2 \"ipam\": {\"type\": \"whereabouts\", \"ipRanges\": [{\"range\": \"1.1.1.0/24\"}]} } ] }", "oc apply -f vlan100-additional-network-configuration.yaml", "apiVersion: v1 kind: Namespace metadata: name: test-namespace labels: pod-security.kubernetes.io/enforce: privileged pod-security.kubernetes.io/audit: privileged pod-security.kubernetes.io/warn: privileged security.openshift.io/scc.podSecurityLabelSync: \"false\" --- apiVersion: v1 kind: Pod metadata: name: nginx-pod namespace: test-namespace annotations: k8s.v1.cni.cncf.io/networks: '[ { \"name\": \"sriov-network\", \"namespace\": \"test-namespace\", \"interface\": \"ext0\" 1 }, { \"name\": \"vlan-100\", \"namespace\": \"test-namespace\", \"interface\": \"ext0.100\" } ]' spec: securityContext: runAsNonRoot: true containers: - name: nginx-container image: nginxinc/nginx-unprivileged:latest securityContext: allowPrivilegeEscalation: false capabilities: drop: [\"ALL\"] ports: - containerPort: 80 seccompProfile: type: \"RuntimeDefault\"", "oc apply -f pod-a.yaml", "oc describe pods nginx-pod -n test-namespace", "Name: nginx-pod Namespace: test-namespace Priority: 0 Node: worker-1/10.46.186.105 Start Time: Mon, 14 Aug 2023 16:23:13 -0400 Labels: <none> Annotations: k8s.ovn.org/pod-networks: {\"default\":{\"ip_addresses\":[\"10.131.0.26/23\"],\"mac_address\":\"0a:58:0a:83:00:1a\",\"gateway_ips\":[\"10.131.0.1\"],\"routes\":[{\"dest\":\"10.128.0.0 k8s.v1.cni.cncf.io/network-status: [{ \"name\": \"ovn-kubernetes\", \"interface\": \"eth0\", \"ips\": [ \"10.131.0.26\" ], \"mac\": \"0a:58:0a:83:00:1a\", \"default\": true, \"dns\": {} },{ \"name\": \"test-namespace/sriov-network\", \"interface\": \"ext0\", \"mac\": \"6e:a7:5e:3f:49:1b\", \"dns\": {}, \"device-info\": { \"type\": \"pci\", \"version\": \"1.0.0\", \"pci\": { \"pci-address\": \"0000:d8:00.2\" } } },{ \"name\": \"test-namespace/vlan-100\", \"interface\": \"ext0.100\", \"ips\": [ \"1.1.1.1\" ], \"mac\": \"6e:a7:5e:3f:49:1b\", \"dns\": {} }] k8s.v1.cni.cncf.io/networks: [ { \"name\": \"sriov-network\", \"namespace\": \"test-namespace\", \"interface\": \"ext0\" }, { \"name\": \"vlan-100\", \"namespace\": \"test-namespace\", \"i openshift.io/scc: privileged Status: Running IP: 10.131.0.26 IPs: IP: 10.131.0.26", "oc new-project test-namespace", "apiVersion: \"k8s.cni.cncf.io/v1\" kind: NetworkAttachmentDefinition metadata: name: bridge-network spec: config: '{ \"cniVersion\": \"0.4.0\", \"name\": \"bridge-network\", \"type\": \"bridge\", \"bridge\": \"br-001\", \"isGateway\": true, \"ipMasq\": true, \"hairpinMode\": true, \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.0.0.0/24\", \"routes\": [{\"dst\": \"0.0.0.0/0\"}] } }'", "oc apply -f bridge-nad.yaml", "oc get network-attachment-definitions", "NAME AGE bridge-network 15s", "apiVersion: k8s.cni.cncf.io/v1 kind: NetworkAttachmentDefinition metadata: name: ipvlan-net namespace: test-namespace spec: config: '{ \"cniVersion\": \"0.3.1\", \"name\": \"ipvlan-net\", \"type\": \"ipvlan\", \"master\": \"ext0\", 1 \"mode\": \"l3\", \"linkInContainer\": true, 2 \"ipam\": {\"type\": \"whereabouts\", \"ipRanges\": [{\"range\": \"10.0.0.0/24\"}]} }'", "oc apply -f ipvlan-additional-network-configuration.yaml", "oc get network-attachment-definitions", "NAME AGE bridge-network 87s ipvlan-net 9s", "apiVersion: v1 kind: Pod metadata: name: pod-a namespace: test-namespace annotations: k8s.v1.cni.cncf.io/networks: '[ { \"name\": \"bridge-network\", \"interface\": \"ext0\" 1 }, { \"name\": \"ipvlan-net\", \"interface\": \"ext1\" } ]' spec: securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault containers: - name: test-pod image: quay.io/openshifttest/hello-sdn@sha256:c89445416459e7adea9a5a416b3365ed3d74f2491beb904d61dc8d1eb89a72a4 securityContext: allowPrivilegeEscalation: false capabilities: drop: [ALL]", "oc apply -f pod-a.yaml", "oc get pod -n test-namespace", "NAME READY STATUS RESTARTS AGE pod-a 1/1 Running 0 2m36s", "oc exec -n test-namespace pod-a -- ip a", "1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 3: eth0@if105: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1400 qdisc noqueue state UP group default link/ether 0a:58:0a:d9:00:5d brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.217.0.93/23 brd 10.217.1.255 scope global eth0 valid_lft forever preferred_lft forever inet6 fe80::488b:91ff:fe84:a94b/64 scope link valid_lft forever preferred_lft forever 4: ext0@if107: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default link/ether be:da:bd:7e:f4:37 brd ff:ff:ff:ff:ff:ff link-netnsid 0 inet 10.0.0.2/24 brd 10.0.0.255 scope global ext0 valid_lft forever preferred_lft forever inet6 fe80::bcda:bdff:fe7e:f437/64 scope link valid_lft forever preferred_lft forever 5: ext1@ext0: <BROADCAST,MULTICAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default link/ether be:da:bd:7e:f4:37 brd ff:ff:ff:ff:ff:ff inet 10.0.0.1/24 brd 10.0.0.255 scope global ext1 valid_lft forever preferred_lft forever inet6 fe80::beda:bd00:17e:f437/64 scope link valid_lft forever preferred_lft forever", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: annotations: k8s.v1.cni.cncf.io/policy-for: <network_name>", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: useMultiNetworkPolicy: true", "oc patch network.operator.openshift.io cluster --type=merge --patch-file=multinetwork-enable-patch.yaml", "network.operator.openshift.io/cluster patched", "touch <policy_name>.yaml", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: deny-by-default annotations: k8s.v1.cni.cncf.io/policy-for:<namespace_name>/<network_name> spec: podSelector: {} policyTypes: - Ingress ingress: []", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: allow-same-namespace annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: ingress: - from: - podSelector: {}", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: allow-traffic-pod annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: pod: pod-a policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: namespace-y", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: api-allow annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: app: bookstore role: api ingress: - from: - podSelector: matchLabels: app: bookstore", "oc apply -f <policy_name>.yaml -n <namespace>", "multinetworkpolicy.k8s.cni.cncf.io/deny-by-default created", "oc get multi-networkpolicy", "oc apply -n <namespace> -f <policy_file>.yaml", "oc edit multi-networkpolicy <policy_name> -n <namespace>", "oc describe multi-networkpolicy <policy_name> -n <namespace>", "oc get multi-networkpolicy", "oc describe multi-networkpolicy <policy_name> -n <namespace>", "oc delete multi-networkpolicy <policy_name> -n <namespace>", "multinetworkpolicy.k8s.cni.cncf.io/default-deny deleted", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: deny-by-default namespace: default 1 annotations: k8s.v1.cni.cncf.io/policy-for: <namespace_name>/<network_name> 2 spec: podSelector: {} 3 policyTypes: 4 - Ingress 5 ingress: [] 6", "oc apply -f deny-by-default.yaml", "multinetworkpolicy.k8s.cni.cncf.io/deny-by-default created", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: web-allow-external namespace: default annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: policyTypes: - Ingress podSelector: matchLabels: app: web ingress: - {}", "oc apply -f web-allow-external.yaml", "multinetworkpolicy.k8s.cni.cncf.io/web-allow-external created", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: web-allow-all-namespaces namespace: default annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: app: web 1 policyTypes: - Ingress ingress: - from: - namespaceSelector: {} 2", "oc apply -f web-allow-all-namespaces.yaml", "multinetworkpolicy.k8s.cni.cncf.io/web-allow-all-namespaces created", "oc run web --namespace=default --image=nginx --labels=\"app=web\" --expose --port=80", "oc run test-USDRANDOM --namespace=secondary --rm -i -t --image=alpine -- sh", "wget -qO- --timeout=2 http://web.default", "<!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href=\"http://nginx.org/\">nginx.org</a>.<br/> Commercial support is available at <a href=\"http://nginx.com/\">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html>", "apiVersion: k8s.cni.cncf.io/v1beta1 kind: MultiNetworkPolicy metadata: name: web-allow-prod namespace: default annotations: k8s.v1.cni.cncf.io/policy-for: <network_name> spec: podSelector: matchLabels: app: web 1 policyTypes: - Ingress ingress: - from: - namespaceSelector: matchLabels: purpose: production 2", "oc apply -f web-allow-prod.yaml", "multinetworkpolicy.k8s.cni.cncf.io/web-allow-prod created", "oc run web --namespace=default --image=nginx --labels=\"app=web\" --expose --port=80", "oc create namespace prod", "oc label namespace/prod purpose=production", "oc create namespace dev", "oc label namespace/dev purpose=testing", "oc run test-USDRANDOM --namespace=dev --rm -i -t --image=alpine -- sh", "wget -qO- --timeout=2 http://web.default", "wget: download timed out", "oc run test-USDRANDOM --namespace=prod --rm -i -t --image=alpine -- sh", "wget -qO- --timeout=2 http://web.default", "<!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href=\"http://nginx.org/\">nginx.org</a>.<br/> Commercial support is available at <a href=\"http://nginx.com/\">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html>", "metadata: annotations: k8s.v1.cni.cncf.io/networks: <network>[,<network>,...] 1", "metadata: annotations: k8s.v1.cni.cncf.io/networks: |- [ { \"name\": \"<network>\", 1 \"namespace\": \"<namespace>\", 2 \"default-route\": [\"<default-route>\"] 3 } ]", "oc create -f <name>.yaml", "oc get pod <name> -o yaml", "oc get pod example-pod -o yaml apiVersion: v1 kind: Pod metadata: annotations: k8s.v1.cni.cncf.io/networks: macvlan-bridge k8s.v1.cni.cncf.io/network-status: |- 1 [{ \"name\": \"openshift-sdn\", \"interface\": \"eth0\", \"ips\": [ \"10.128.2.14\" ], \"default\": true, \"dns\": {} },{ \"name\": \"macvlan-bridge\", \"interface\": \"net1\", \"ips\": [ \"20.2.2.100\" ], \"mac\": \"22:2f:60:a5:f8:00\", \"dns\": {} }] name: example-pod namespace: default spec: status:", "oc edit pod <name>", "metadata: annotations: k8s.v1.cni.cncf.io/networks: '[<network>[,<network>,...]]' 1", "apiVersion: v1 kind: Pod metadata: name: example-pod annotations: k8s.v1.cni.cncf.io/networks: '[ { \"name\": \"net1\" }, { \"name\": \"net2\", 1 \"default-route\": [\"192.0.2.1\"] 2 }]' spec: containers: - name: example-pod command: [\"/bin/bash\", \"-c\", \"sleep 2000000000000\"] image: centos/tools", "oc exec -it <pod_name> -- ip route", "oc edit networks.operator.openshift.io cluster", "name: <name> 1 namespace: <namespace> 2 rawCNIConfig: '{ 3 }' type: Raw", "{ \"cniVersion\": \"0.3.1\", \"name\": \"<name>\", 1 \"plugins\": [{ 2 \"type\": \"macvlan\", \"capabilities\": { \"ips\": true }, 3 \"master\": \"eth0\", 4 \"mode\": \"bridge\", \"ipam\": { \"type\": \"static\" } }, { \"capabilities\": { \"mac\": true }, 5 \"type\": \"tuning\" }] }", "oc edit pod <name>", "apiVersion: v1 kind: Pod metadata: name: example-pod annotations: k8s.v1.cni.cncf.io/networks: '[ { \"name\": \"<name>\", 1 \"ips\": [ \"192.0.2.205/24\" ], 2 \"mac\": \"CA:FE:C0:FF:EE:00\" 3 } ]'", "oc exec -it <pod_name> -- ip a", "oc delete pod <name> -n <namespace>", "oc edit networks.operator.openshift.io cluster", "oc get network-attachment-definitions <network-name> -o yaml", "oc get network-attachment-definitions net1 -o go-template='{{printf \"%s\\n\" .spec.config}}' { \"cniVersion\": \"0.3.1\", \"type\": \"macvlan\", \"master\": \"ens5\", \"mode\": \"bridge\", \"ipam\": {\"type\":\"static\",\"routes\":[{\"dst\":\"0.0.0.0/0\",\"gw\":\"10.128.2.1\"}],\"addresses\":[{\"address\":\"10.128.2.100/23\",\"gateway\":\"10.128.2.1\"}],\"dns\":{\"nameservers\":[\"172.30.0.10\"],\"domain\":\"us-west-2.compute.internal\",\"search\":[\"us-west-2.compute.internal\"]}} }", "oc edit networks.operator.openshift.io cluster", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: [] 1", "oc get network-attachment-definition --all-namespaces", "apiVersion: operator.openshift.io/v1 kind: Network metadata: name: cluster spec: additionalNetworks: - name: test-network-1 namespace: additional-network-1 type: Raw rawCNIConfig: '{ \"cniVersion\": \"0.3.1\", \"name\": \"macvlan-vrf\", \"plugins\": [ 1 { \"type\": \"macvlan\", \"master\": \"eth1\", \"ipam\": { \"type\": \"static\", \"addresses\": [ { \"address\": \"191.168.1.23/24\" } ] } }, { \"type\": \"vrf\", 2 \"vrfname\": \"vrf-1\", 3 \"table\": 1001 4 }] }'", "oc create -f additional-network-attachment.yaml", "oc get network-attachment-definitions -n <namespace>", "NAME AGE additional-network-1 14m", "apiVersion: v1 kind: Pod metadata: name: pod-additional-net annotations: k8s.v1.cni.cncf.io/networks: '[ { \"name\": \"test-network-1\" 1 } ]' spec: containers: - name: example-pod-1 command: [\"/bin/bash\", \"-c\", \"sleep 9000000\"] image: centos:8", "oc create -f pod-additional-net.yaml", "pod/test-pod created", "ip vrf show", "Name Table ----------------------- vrf-1 1001", "ip link", "5: net1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master red state UP mode" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.14/html/networking/multiple-networks
Chapter 4. Major Satellite components
Chapter 4. Major Satellite components A typical Satellite deployment consists of the following components: a Satellite Server, Capsule Servers that mirror content from Satellite Server, and hosts that receive content and configuration from Satellite Server and Capsule Servers. 4.1. Satellite Server overview Satellite Server is the central component of a Satellite deployment where you plan and manage the content lifecycle. A typical Satellite deployment includes one Satellite Server on which you perform the following operations: Content lifecycle management Configuration of Capsule Servers Configuration of hosts Host provisioning Patch management Subscription management Satellite Server delegates content distribution, host provisioning, and communication to Capsule Servers. Satellite Server itself also includes a Capsule. Satellite Server also contains a fine-grained authentication system. You can grant Satellite users permissions to access precisely the parts of the infrastructure for which they are responsible. Additional resources For more information about managing permissions, see Managing Users and Roles in Administering Red Hat Satellite . 4.2. Organizations and locations in Red Hat Satellite On your Satellite Server, you can define multiple organizations and locations to help organize content, hosts, and configurations. Organizations Organizations typically represent different business units, departments, or teams, such as Finance , Marketing , or Web Development . By creating organizations, you can create logical containers to isolate and manage their configurations separately according to their specific requirements. Locations Locations typically represent physical locations, such as countries or cities. By creating locations, you can define geographical sites where hosts are located. For example, this is useful in environments with multiple data centers. 4.3. Capsule overview With Capsule Servers, you can extend the reach and scalability of your Satellite deployment. Capsule Servers provide the following functionalities in a Red Hat Satellite deployment: Mirroring content from Satellite Server to establish content sources in various geographical or logical locations. By registering a host to a Capsule Server, you can configure this host to receive content and configuration from the Capsule in their location instead of from the central Satellite Server. Running localized services to discover, provision, control, and configure hosts. By using content views, you can specify the exact subset of content that Capsule Server makes available to hosts. For more information, see Chapter 1, Content and patch management with Red Hat Satellite . 4.4. Overview of hosts in Satellite A host is any Linux client that Red Hat Satellite manages. Hosts can be physical or virtual. You can deploy virtual hosts on any platform supported by Red Hat Satellite, such as Amazon EC2, Google Compute Engine, KVM, libvirt, Microsoft Azure, OpenStack, Red Hat Virtualization, Rackspace Cloud Services, or VMware vSphere. With Satellite, you can manage hosts at scale, including monitoring, provisioning, remote execution, configuration management, software management, and subscription management. 4.5. List of key open source components of Satellite Server Satellite consists of several open source projects integrated with each other, such as the following: Foreman Foreman is a lifecycle management application for physical and virtual systems. It helps manage hosts throughout their lifecycle, from provisioning and configuration to orchestration and monitoring. Katello Katello is a plugin of Foreman that extends Foreman capabilities with additional features for content, subscription, and repository management. Katello enables Satellite to subscribe to Red Hat repositories and to download content. Candlepin Candlepin is a service for subscription management. Pulp Pulp is a service for repository and content management. Additional resources See Satellite 6 Component Versions for a complete list of the upstream components integrated into Satellite and for information about which upstream component versions were delivered with different versions of Satellite. 4.6. Capsule features Capsule Servers provide local host management services and can mirror content from Satellite Server. To mirror content from Satellite Server, Capsule provides the following functionalities: Repository synchronization Capsule Servers pull content for selected lifecycle environments from Satellite Server and make this content available to the hosts they manage. Content delivery Hosts configured to use Capsule Server download content from that Capsule rather than from Satellite Server. Host action delivery Capsule Server executes scheduled actions on hosts. Red Hat Subscription Management (RHSM) proxy Hosts are registered to their associated Capsule Servers rather than to the central Satellite Server or the Red Hat Customer Portal. You can use Capsule to run the following services for infrastructure and host management: DHCP Capsule can manage a DHCP server, including integration with an existing solution, such as ISC DHCP servers, Active Directory, and Libvirt instances. DNS Capsule can manage a DNS server, including integration with an existing solution, such as ISC BIND and Active Directory. TFTP Capsule can integrate with any UNIX-based TFTP server. Realm Capsule can manage Kerberos realms or domains so that hosts can join them automatically during provisioning. Capsule can integrate with an existing infrastructure, including Identity Management and Active Directory. Puppet server Capsule can act as a configuration management server by running a Puppet server. Puppet Certificate Authority Capsule can integrate with the Puppet certificate authority (CA) to provide certificates to hosts. Baseboard Management Controller (BMC) Capsule can provide power management for hosts by using the Intelligent Platform Management Interface (IPMI) or Redfish standards. Provisioning template proxy Capsule can serve provisioning templates to hosts. OpenSCAP Capsule can perform security compliance scans on hosts. Remote Execution (REX) Capsule can run remote job execution on hosts. You can configure a Capsule Server for a specific limited purpose by enabling only selected features on that Capsule. Common configurations include the following: Infrastructure Capsules: DNS + DHCP + TFTP Capsules with these services provide infrastructure services for hosts and have all necessary services for provisioning new hosts. Content Capsules: Pulp Capsules with this service provide content synchronized from Satellite Server to hosts. Configuration Capsules: Pulp + Puppet + PuppetCA Capsules with these services provide content and run configuration services for hosts. Capsules with DNS + DHCP + TFTP + Pulp + Puppet + PuppetCA Capsules with these services provide a full set of Capsule features. By configuring a Capsule with all these features, you can isolate hosts assigned to that Capsule by providing a single point of connection for the hosts. 4.7. Capsule networking The communication between Satellite Server and hosts registered to a Capsule Server is routed through that Capsule Server. Capsule Server also provides Satellite services to hosts. Many of the services that Capsule Server manages use dedicated network ports. However, Capsule Server ensures that all communications from the host to Satellite Server use a single source IP address, which simplifies firewall administration. Satellite topology with hosts connecting to a Capsule In this topology, Capsule provides a single endpoint for all host network communications so that in remote network segments, only firewall ports to the Capsule itself must be open. Figure 4.1. How Satellite components interact when hosts connect to a Capsule Satellite topology with hosts connecting directly to Satellite Server In this topology, hosts connect to Satellite Server rather than a Capsule. This applies also to Capsules themselves because the Capsule Server is a host of Satellite Server. Figure 4.2. How Satellite components interact when hosts connect directly to Satellite Server Additional resources You can find complete instructions for configuring the host-based firewall to open the required ports in the following documents: Ports and Firewalls Requirements in Installing Satellite Server in a connected network environment Ports and Firewalls Requirements in Installing Satellite Server in a disconnected network environment Ports and Firewalls Requirements in Installing Capsule Server 4.8. Additional resources See Installing Capsule Server for details on Capsule Server requirements, installation, and scalability considerations. See Configuring Capsules with a load balancer for details on distributing load among Capsule Servers.
null
https://docs.redhat.com/en/documentation/red_hat_satellite/6.16/html/overview_concepts_and_deployment_considerations/Major-Satellite-Components_planning
Chapter 104. KafkaUserTlsClientAuthentication schema reference
Chapter 104. KafkaUserTlsClientAuthentication schema reference Used in: KafkaUserSpec The type property is a discriminator that distinguishes use of the KafkaUserTlsClientAuthentication type from KafkaUserTlsExternalClientAuthentication , KafkaUserScramSha512ClientAuthentication . It must have the value tls for the type KafkaUserTlsClientAuthentication . Property Property type Description type string Must be tls .
null
https://docs.redhat.com/en/documentation/red_hat_streams_for_apache_kafka/2.7/html/streams_for_apache_kafka_api_reference/type-KafkaUserTlsClientAuthentication-reference
Getting Started with AMQ Broker
Getting Started with AMQ Broker Red Hat AMQ Broker 7.10 For Use with AMQ Broker 7.10
null
https://docs.redhat.com/en/documentation/red_hat_amq_broker/7.10/html/getting_started_with_amq_broker/index
Chapter 3. Analyzing your projects with the MTR plugin
Chapter 3. Analyzing your projects with the MTR plugin You can analyze your projects with the MTR plugin by creating a run configuration and running an analysis. 3.1. Creating a run configuration You can create multiple run configurations to run against each project you import to IntelliJ IDEA. Procedure In the Projects view, click the project you want to analyze. On the left side of the screen, click the Migration Toolkit for Runtimes tab. If this is your first configuration, the run configuration panel is displayed on the right. If this is not your first configuration, right-click configuration in the list and select New configuration . The run configuration panel is displayed on the right. Complete the following configuration fields: cli : Enter the path to the cli executable. For example: USDHOME/mtr-cli-1.2.7.GA-redhat-00001/bin/mta-cli . Input : Click Add and enter the input file or directory. Target : Select one or more target migration paths. Note The location shown in the Output is set by the plugin. In the list of configurations, right-click the new configuration and select Run Analysis . The Console (MTR) terminal emulator opens, displaying information about the progress of the analysis. When the analysis is completed, you can click either Report or Results below the name of the configuration file you ran. Reports opens the MTR report, which describes any issues you need to address before you migrate or modernize your application. For more information, see Reviewing the reports in the CLI Guide . Results opens a directory displaying hints (issues) per application.
null
https://docs.redhat.com/en/documentation/migration_toolkit_for_runtimes/1.2/html/intellij_idea_plugin_guide/analyzing-projects-with-idea-plugin
Making open source more inclusive
Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message .
null
https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html/integrate_openstack_identity_with_external_user_management_services/making-open-source-more-inclusive
2.8. Performance Issues: Check the Red Hat Customer Portal
2.8. Performance Issues: Check the Red Hat Customer Portal For information on recommendations for deploying and upgrading Red Hat Enterprise Linux clusters using the High Availability Add-On and Red Hat Global File System 2 (GFS2) see the article "Red Hat Enterprise Linux Cluster, High Availability, and GFS Deployment Best Practices" on the Red Hat Customer Portal at https://access.redhat.com/kb/docs/DOC-40821 .
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/global_file_system_2/s1-customer-portal
Chapter 4. Supported operating systems and architectures
Chapter 4. Supported operating systems and architectures .NET 6.0 is available on x86_64 , aarch64 , and s390x on Red Hat Enterprise Linux 8 and later. Table 4.1. Supported deployment environments for .NET 6.0 Platform Architecture RPM Repository Red Hat Enterprise Linux 7 (Until 30 June 2024 ) AMD64 and Intel 64 ( x86_64 ) rh-dotnet60 Red Hat Enterprise Linux 7 Server: rhel-7-server-dotnet-rpms Red Hat Enterprise Linux 7 Workstation: rhel-7-workstation-dotnet-rpm Red Hat Enterprise Linux 7 HPC: rhel-7-hpc-node-dotnet-rpms Red Hat Enterprise Linux 8 AMD64 and Intel 64 ( x86_64 ) IBM Z and LinuxONE ( s390x ) 64-bit Arm ( aarch64 ) dotnet-sdk-6.0 Appstream NOTE: The AppStream repositories are enabled by default in Red Hat Enterprise Linux 8. Red Hat Enterprise Atomic Host AMD64 and Intel 64 ( x86_64 ) OpenShift Container Platform 3.11 and later AMD64 and Intel 64 ( x86_64 ) OpenShift Container Platform 4.2 and later IBM Z and LinuxONE ( s390x )
null
https://docs.redhat.com/en/documentation/net/6.0/html/release_notes_for_.net_6.0_rpm_packages/supported-operating-systems-and-architecture_release-notes-for-dotnet-rpms
Chapter 2. HA Solutions for SAP HANA
Chapter 2. HA Solutions for SAP HANA 2.1. Automated SAP HANA System Replication SAP HANA System Replication (HSR) is a built-in high availability and disaster recovery feature to support business continuity. With HANA System Replication, it's possible to copy and continuously synchronize a SAP HANA database to one or more locations. Data is constantly pre-loaded on the secondary system to minimize recovery time objective (RTO). However, SAP HANA does not contain any mechanism to automatically trigger a failover in case an issue occurs with any components that are part of a HANA System Replication setup. But 3rd party cluster solutions can be used to monitor the healthiness of the HANA System Replication environment and trigger failover when a failure is detected. On RHEL the Red Hat Enterprise Linux HA Add-On can be used to automate the failover. Red Hat provides HA solutions for both single system SAP HANA setups (Scale-up) or scaleable multi-system SAP HANA setups ( Scale-out). 2.2. Supported Scenarios for Automated SAP HANA Scale-Up System Replication Supported Scenario Notes Performance Optimized Secondary site is not active for client/application servers Cost Optimized Support a QA/Test instance running on the secondary site (Cost-Optimized); QA/Test instance will be shutdown first during the failover of Prod Active/Active (Read Enabled) The secondary HANA instance can take read-only inquiries Multitier System Replication Multitier System Replication is possible, but the tertiary site cannot be managed by the cluster Multitarget System Replication In addition to the standard HANA System Replication the data is replicated to additional secondary HANA instances that are not managed by the cluster 2.2.1. Support Policies Please refer to Support Policies for RHEL High Availability Clusters - Management of SAP HANA in a Cluster . 2.2.2. Performance Optimized In the Performance Optimized scenario, the secondary HANA database is configured to preload the tables into memory, thus the takeover time is normally very fast. However, as the secondary HANA database is dedicated to System Replication and doesn't accept client inquiries, this setup is expensive in terms of hardware cost. 2.2.2.1. Configuration Guides On-Premise: Automating SAP HANA Scale-Up System Replication using the RHEL HA Add-On AWS: Configuring SAP HANA Scale-Up System Replication with the RHEL HA Add-On on Amazon Web Services (AWS) Azure: High availability of SAP HANA on Azure VMs on Red Hat Enterprise Linux Google Cloud Platform (GCP): HA cluster configuration guide for SAP HANA on RHEL IBM Power System Virtual Server: Configure SAP HANA Scale-Up System Replication in a RHEL HA Add-On cluster 2.2.3. Cost Optimized The Cost Optimized scenario supports an additional TEST/QA HANA database on the secondary site, serving client inquiries. Because hardware resources have to be allocated to the TEST/QA instance, the Production HANA database can not be preloaded. Before takeover, the TEST/QA instance has to be shutdown first to free up the hardware resources assigned to it and reassign them to the secondary HANA instance that will be promoted to become the primary instance. The takeover time is therefore longer than for Performance Optimized setups. See also Automating Cost-Optimized SAP HANA Scale-Up System Replication using the RHEL HA Add-On . 2.2.4. Active/Active(Read Enabled) The secondary HANA instance can take read-only inquiries. This setup supports a second virtual IP on the secondary site. For more information, please refer to Adding a secondary virtual IP address for an Active/Active (Read-Enabled) HANA System Replication setup . 2.2.5. Multitier System Replication Multitier System Replication is possible, but the tertiary site cannot be managed by the cluster. A takeover to the tertiary site will have to be triggered manually, and if the environment should be brought back to the state after a manual takeover to the tertiary site, all steps to reconfigure the HANA System Replication setup will have to be carried out manually as well while the cluster is disabled. After it has been verified that the HANA System Replication setup is working correctly again on the HANA instances that should be managed by the cluster, the cluster can be reactivated. 2.2.6. Multitarget System Replication When using HANA 2.0 SPS 04 or newer and a RHEL release that provides version 0.162.1 or newer of the resource-agents-sap-hana RPM package, Multitarget System Replication is supported for HANA Scale-Up System Replication setups managed by the RHEL HA Add-On. In a Scale-Up Multitarget System Replication HA cluster setup, the primary HANA instance is replicated to a secondary HANA instance managed by the HA cluster and to additional secondary HANA instances not managed by the cluster to meet additional availability requirements. 2.2.6.1. Configuration Guide On-Premise: Configuring SAP HANA Scale-Up Multitarget System Replication for disaster recovery 2.3. Supported Scenarios for Automated SAP HANA Scale-Out System Replication Supported Scenario Description Performance Optimized Secondary site is not active to client/application servers Active/Active (Read Enabled) The secondary HANA instance can take read-only inquiries Multitarget System Replication Primary HANA instance is replicated to multiple secondary HANA instances 2.3.1. Support Policies Please refer to Support Policies for RHEL High Availability Clusters - Management of SAP HANA in a Cluster . 2.3.2. Configuration Guides for Performance Optimized HANA Scale-Out System Replication HA setups On-Premise: Red Hat Enterprise Linux HA Solution for SAP HANA Scale Out and System Replication AWS: Configuring SAP HANA Scale-Out System Replication with the RHEL HA Add-On on Amazon Web Services (AWS) Azure: High availability of SAP HANA scale-out system on Red Hat Enterprise Linux 2.3.3. Active/Active (Read Enabled) HANA Scale-Out System Replication In HANA 2.0, the secondary instance can take Read-Only inquiries. This setup supports a second virtual IP on the secondary site. For more details please check chapter "Adding a secondary virtual IP address resource for Active/Active (Read-Enabled) setup" in Red Hat Enterprise Linux HA Solution for SAP HANA Scale Out and System Replication . For more information see Active/Active(Read-Enabled) . 2.3.4. Multitarget System Replication (Scale-Out) Starting with HANA 2.0 SPS 04 Multitarget System Replication is supported in a cluster environment. The primary site is replicated to a secondary site and also replicated to an additional secondary site to meet additional availability requirements. In terms of failure this additional third site will be automatically registered to the new primary site, which was the former secondary. For more details, refer to Multitarget System Replication .
null
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux_for_sap_solutions/9/html/red_hat_ha_solutions_for_sap_hana_s4hana_and_netweaver_based_sap_applications/asmb_sh_ha_sol_for_hana_ha-sol-hana-netweaver-9
Chapter 3. Setting up and configuring the registry
Chapter 3. Setting up and configuring the registry 3.1. Configuring the registry for AWS user-provisioned infrastructure 3.1.1. Configuring a secret for the Image Registry Operator In addition to the configs.imageregistry.operator.openshift.io and ConfigMap resources, configuration is provided to the Operator by a separate secret resource located within the openshift-image-registry namespace. The image-registry-private-configuration-user secret provides credentials needed for storage access and management. It overrides the default credentials used by the Operator, if default credentials were found. For S3 on AWS storage, the secret is expected to contain two keys: REGISTRY_STORAGE_S3_ACCESSKEY REGISTRY_STORAGE_S3_SECRETKEY Procedure Create an OpenShift Container Platform secret that contains the required keys. USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=myaccesskey --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=mysecretkey --namespace openshift-image-registry 3.1.2. Configuring registry storage for AWS with user-provisioned infrastructure During installation, your cloud credentials are sufficient to create an Amazon S3 bucket and the Registry Operator will automatically configure storage. If the Registry Operator cannot create an S3 bucket and automatically configure storage, you can create an S3 bucket and configure storage with the following procedure. Prerequisites You have a cluster on AWS with user-provisioned infrastructure. For Amazon S3 storage, the secret is expected to contain two keys: REGISTRY_STORAGE_S3_ACCESSKEY REGISTRY_STORAGE_S3_SECRETKEY Procedure Use the following procedure if the Registry Operator cannot create an S3 bucket and automatically configure storage. Set up a Bucket Lifecycle Policy to abort incomplete multipart uploads that are one day old. Fill in the storage configuration in configs.imageregistry.operator.openshift.io/cluster : USD oc edit configs.imageregistry.operator.openshift.io/cluster Example configuration storage: s3: bucket: <bucket-name> region: <region-name> Warning To secure your registry images in AWS, block public access to the S3 bucket. 3.1.3. Image Registry Operator configuration parameters for AWS S3 The following configuration parameters are available for AWS S3 registry storage. The image registry spec.storage.s3 configuration parameter holds the information to configure the registry to use the AWS S3 service for back-end storage. See the S3 storage driver documentation for more information. Parameter Description bucket Bucket is the bucket name in which you want to store the registry's data. It is optional and is generated if not provided. chunkSizeMiB ChunkSizeMiB is the size of the multipart upload chunks of the S3 API. The default is 10 MiB with a minimum of 5 MiB. region Region is the AWS region in which your bucket exists. It is optional and is set based on the installed AWS Region. regionEndpoint RegionEndpoint is the endpoint for S3 compatible storage services. It is optional and defaults based on the Region that is provided. virtualHostedStyle VirtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint. It is optional and defaults to false. Set this parameter to deploy OpenShift Container Platform to hidden regions. encrypt Encrypt specifies whether or not the registry stores the image in encrypted format. It is optional and defaults to false. keyID KeyID is the KMS key ID to use for encryption. It is optional. Encrypt must be true, or this parameter is ignored. cloudFront CloudFront configures Amazon Cloudfront as the storage middleware in a registry. It is optional. trustedCA The namespace for the config map referenced by trustedCA is openshift-config . The key for the bundle in the config map is ca-bundle.crt . It is optional. Note When the value of the regionEndpoint parameter is configured to a URL of a Rados Gateway, an explicit port must not be specified. For example: regionEndpoint: http://rook-ceph-rgw-ocs-storagecluster-cephobjectstore.openshift-storage.svc.cluster.local 3.2. Configuring the registry for GCP user-provisioned infrastructure 3.2.1. Configuring a secret for the Image Registry Operator In addition to the configs.imageregistry.operator.openshift.io and ConfigMap resources, configuration is provided to the Operator by a separate secret resource located within the openshift-image-registry namespace. The image-registry-private-configuration-user secret provides credentials needed for storage access and management. It overrides the default credentials used by the Operator, if default credentials were found. For GCS on GCP storage, the secret is expected to contain one key whose value is the contents of a credentials file provided by GCP: REGISTRY_STORAGE_GCS_KEYFILE Procedure Create an OpenShift Container Platform secret that contains the required keys. USD oc create secret generic image-registry-private-configuration-user --from-file=REGISTRY_STORAGE_GCS_KEYFILE=<path_to_keyfile> --namespace openshift-image-registry 3.2.2. Configuring the registry storage for GCP with user-provisioned infrastructure If the Registry Operator cannot create a Google Cloud Platform (GCP) bucket, you must set up the storage medium manually and configure the settings in the registry custom resource (CR). Prerequisites A cluster on GCP with user-provisioned infrastructure. To configure registry storage for GCP, you need to provide Registry Operator cloud credentials. For GCS on GCP storage, the secret is expected to contain one key whose value is the contents of a credentials file provided by GCP: REGISTRY_STORAGE_GCS_KEYFILE Procedure Set up an Object Lifecycle Management policy to abort incomplete multipart uploads that are one day old. Fill in the storage configuration in configs.imageregistry.operator.openshift.io/cluster : USD oc edit configs.imageregistry.operator.openshift.io/cluster Example configuration # ... storage: gcs: bucket: <bucket-name> projectID: <project-id> region: <region-name> # ... Warning You can secure your registry images that use a Google Cloud Storage bucket by setting public access prevention . 3.2.3. Image Registry Operator configuration parameters for GCP GCS The following configuration parameters are available for GCP GCS registry storage. Parameter Description bucket Bucket is the bucket name in which you want to store the registry's data. It is optional and is generated if not provided. region Region is the GCS location in which your bucket exists. It is optional and is set based on the installed GCS Region. projectID ProjectID is the Project ID of the GCP project that this bucket should be associated with. It is optional. keyID KeyID is the KMS key ID to use for encryption. It is optional because buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. 3.3. Configuring the registry for OpenStack user-provisioned infrastructure You can configure the registry of a cluster that runs on your own Red Hat OpenStack Platform (RHOSP) infrastructure. 3.3.1. Configuring Image Registry Operator redirects By disabling redirects, you can configure the Image Registry Operator to control whether clients such as OpenShift Container Platform cluster builds or external systems like developer machines are redirected to pull images directly from Red Hat OpenStack Platform (RHOSP) Swift storage. This configuration is optional and depends on whether the clients trust the storage's SSL/TLS certificates. Note In situations where clients to not trust the storage certificate, setting the disableRedirect option can be set to true proxies traffic through the image registry. Consequently, however, the image registry might require more resources, especially network bandwidth, to handle the increased load. Alternatively, if clients trust the storage certificate, the registry can allow redirects. This reduces resource demand on the registry itself. Some users might prefer to configure their clients to trust their self-signed certificate authorities (CAs) instead of disabling redirects. If you are using a self-signed CA, you must decide between trusting the custom CAs or disabling redirects. Procedure To ensures that the image registry proxies traffic instead of relying on Swift storage, change the value of the spec.disableRedirect field in the config.imageregistry object to true by running the following command: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"disableRedirect":true}}' 3.3.2. Configuring a secret for the Image Registry Operator In addition to the configs.imageregistry.operator.openshift.io and ConfigMap resources, configuration is provided to the Operator by a separate secret resource located within the openshift-image-registry namespace. The image-registry-private-configuration-user secret provides credentials needed for storage access and management. It overrides the default credentials used by the Operator, if default credentials were found. For Swift on Red Hat OpenStack Platform (RHOSP) storage, the secret is expected to contain the following two keys: REGISTRY_STORAGE_SWIFT_USERNAME REGISTRY_STORAGE_SWIFT_PASSWORD Procedure Create an OpenShift Container Platform secret that contains the required keys. USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_SWIFT_USERNAME=<username> --from-literal=REGISTRY_STORAGE_SWIFT_PASSWORD=<password> -n openshift-image-registry 3.3.3. Registry storage for RHOSP with user-provisioned infrastructure If the Registry Operator cannot create a Swift bucket, you must set up the storage medium manually and configure the settings in the registry custom resource (CR). Prerequisites A cluster on Red Hat OpenStack Platform (RHOSP) with user-provisioned infrastructure. To configure registry storage for RHOSP, you need to provide Registry Operator cloud credentials. For Swift on RHOSP storage, the secret is expected to contain the following two keys: REGISTRY_STORAGE_SWIFT_USERNAME REGISTRY_STORAGE_SWIFT_PASSWORD Procedure Fill in the storage configuration in configs.imageregistry.operator.openshift.io/cluster : USD oc edit configs.imageregistry.operator.openshift.io/cluster Example configuration # ... storage: swift: container: <container-id> # ... 3.3.4. Image Registry Operator configuration parameters for RHOSP Swift The following configuration parameters are available for Red Hat OpenStack Platform (RHOSP) Swift registry storage. Parameter Description authURL Defines the URL for obtaining the authentication token. This value is optional. authVersion Specifies the Auth version of RHOSP, for example, authVersion: "3" . This value is optional. container Defines the name of a Swift container for storing registry data. This value is optional. domain Specifies the RHOSP domain name for the Identity v3 API. This value is optional. domainID Specifies the RHOSP domain ID for the Identity v3 API. This value is optional. tenant Defines the RHOSP tenant name to be used by the registry. This value is optional. tenantID Defines the RHOSP tenant ID to be used by the registry. This value is optional. regionName Defines the RHOSP region in which the container exists. This value is optional. 3.4. Configuring the registry for Azure user-provisioned infrastructure 3.4.1. Configuring a secret for the Image Registry Operator In addition to the configs.imageregistry.operator.openshift.io and ConfigMap resources, configuration is provided to the Operator by a separate secret resource located within the openshift-image-registry namespace. The image-registry-private-configuration-user secret provides credentials needed for storage access and management. It overrides the default credentials used by the Operator, if default credentials were found. For Azure registry storage, the secret is expected to contain one key whose value is the contents of a credentials file provided by Azure: REGISTRY_STORAGE_AZURE_ACCOUNTKEY Procedure Create an OpenShift Container Platform secret that contains the required key. USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_AZURE_ACCOUNTKEY=<accountkey> --namespace openshift-image-registry 3.4.2. Configuring registry storage for Azure During installation, your cloud credentials are sufficient to create Azure Blob Storage, and the Registry Operator automatically configures storage. Prerequisites A cluster on Azure with user-provisioned infrastructure. To configure registry storage for Azure, provide Registry Operator cloud credentials. For Azure storage the secret is expected to contain one key: REGISTRY_STORAGE_AZURE_ACCOUNTKEY Procedure Create an Azure storage container . Fill in the storage configuration in configs.imageregistry.operator.openshift.io/cluster : USD oc edit configs.imageregistry.operator.openshift.io/cluster Example configuration storage: azure: accountName: <storage-account-name> container: <container-name> 3.4.3. Configuring registry storage for Azure Government During installation, your cloud credentials are sufficient to create Azure Blob Storage, and the Registry Operator automatically configures storage. Prerequisites A cluster on Azure with user-provisioned infrastructure in a government region. To configure registry storage for Azure, provide Registry Operator cloud credentials. For Azure storage, the secret is expected to contain one key: REGISTRY_STORAGE_AZURE_ACCOUNTKEY Procedure Create an Azure storage container . Fill in the storage configuration in configs.imageregistry.operator.openshift.io/cluster : USD oc edit configs.imageregistry.operator.openshift.io/cluster Example configuration storage: azure: accountName: <storage-account-name> container: <container-name> cloudName: AzureUSGovernmentCloud 1 1 cloudName is the name of the Azure cloud environment, which can be used to configure the Azure SDK with the appropriate Azure API endpoints. Defaults to AzurePublicCloud . You can also set cloudName to AzureUSGovernmentCloud , AzureChinaCloud , or AzureGermanCloud with sufficient credentials. 3.5. Configuring the registry for RHOSP 3.5.1. Configuring an image registry with custom storage on clusters that run on RHOSP After you install a cluster on Red Hat OpenStack Platform (RHOSP), you can use a Cinder volume that is in a specific availability zone for registry storage. Procedure Create a YAML file that specifies the storage class and availability zone to use. For example: apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: custom-csi-storageclass provisioner: cinder.csi.openstack.org volumeBindingMode: WaitForFirstConsumer allowVolumeExpansion: true parameters: availability: <availability_zone_name> Note OpenShift Container Platform does not verify the existence of the availability zone you choose. Verify the name of the availability zone before you apply the configuration. From a command line, apply the configuration: USD oc apply -f <storage_class_file_name> Example output storageclass.storage.k8s.io/custom-csi-storageclass created Create a YAML file that specifies a persistent volume claim (PVC) that uses your storage class and the openshift-image-registry namespace. For example: apiVersion: v1 kind: PersistentVolumeClaim metadata: name: csi-pvc-imageregistry namespace: openshift-image-registry 1 annotations: imageregistry.openshift.io: "true" spec: accessModes: - ReadWriteOnce volumeMode: Filesystem resources: requests: storage: 100Gi 2 storageClassName: <your_custom_storage_class> 3 1 Enter the namespace openshift-image-registry . This namespace allows the Cluster Image Registry Operator to consume the PVC. 2 Optional: Adjust the volume size. 3 Enter the name of the storage class that you created. From a command line, apply the configuration: USD oc apply -f <pvc_file_name> Example output persistentvolumeclaim/csi-pvc-imageregistry created Replace the original persistent volume claim in the image registry configuration with the new claim: USD oc patch configs.imageregistry.operator.openshift.io/cluster --type 'json' -p='[{"op": "replace", "path": "/spec/storage/pvc/claim", "value": "csi-pvc-imageregistry"}]' Example output config.imageregistry.operator.openshift.io/cluster patched Over the several minutes, the configuration is updated. Verification To confirm that the registry is using the resources that you defined: Verify that the PVC claim value is identical to the name that you provided in your PVC definition: USD oc get configs.imageregistry.operator.openshift.io/cluster -o yaml Example output ... status: ... managementState: Managed pvc: claim: csi-pvc-imageregistry ... Verify that the status of the PVC is Bound : USD oc get pvc -n openshift-image-registry csi-pvc-imageregistry Example output NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE csi-pvc-imageregistry Bound pvc-72a8f9c9-f462-11e8-b6b6-fa163e18b7b5 100Gi RWO custom-csi-storageclass 11m 3.6. Configuring the registry for bare metal 3.6.1. Image registry removed during installation On platforms that do not provide shareable object storage, the OpenShift Image Registry Operator bootstraps itself as Removed . This allows openshift-installer to complete installations on these platform types. After installation, you must edit the Image Registry Operator configuration to switch the managementState from Removed to Managed . When this has completed, you must configure storage. 3.6.2. Changing the image registry's management state To start the image registry, you must change the Image Registry Operator configuration's managementState from Removed to Managed . Procedure Change managementState Image Registry Operator configuration from Removed to Managed . For example: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"managementState":"Managed"}}' 3.6.3. Image registry storage configuration The Image Registry Operator is not initially available for platforms that do not provide default storage. After installation, you must configure your registry to use storage so that the Registry Operator is made available. Instructions are shown for configuring a persistent volume, which is required for production clusters. Where applicable, instructions are shown for configuring an empty directory as the storage location, which is available for only non-production clusters. Additional instructions are provided for allowing the image registry to use block storage types by using the Recreate rollout strategy during upgrades. 3.6.3.1. Configuring registry storage for bare metal and other manual installations As a cluster administrator, following installation you must configure your registry to use storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have a cluster that uses manually-provisioned Red Hat Enterprise Linux CoreOS (RHCOS) nodes, such as bare metal. You have provisioned persistent storage for your cluster, such as Red Hat OpenShift Data Foundation. Important OpenShift Container Platform supports ReadWriteOnce access for image registry storage when you have only one replica. ReadWriteOnce access also requires that the registry uses the Recreate rollout strategy. To deploy an image registry that supports high availability with two or more replicas, ReadWriteMany access is required. Must have 100Gi capacity. Procedure To configure your registry to use storage, change the spec.storage.pvc in the configs.imageregistry/cluster resource. Note When you use shared storage, review your security settings to prevent outside access. Verify that you do not have a registry pod: USD oc get pod -n openshift-image-registry -l docker-registry=default Example output No resources found in openshift-image-registry namespace Note If you do have a registry pod in your output, you do not need to continue with this procedure. Check the registry configuration: USD oc edit configs.imageregistry.operator.openshift.io Example output storage: pvc: claim: Leave the claim field blank to allow the automatic creation of an image-registry-storage PVC. Check the clusteroperator status: USD oc get clusteroperator image-registry Example output NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.18 True False False 6h50m Ensure that your registry is set to managed to enable building and pushing of images. Run: Then, change the line to 3.6.3.2. Configuring storage for the image registry in non-production clusters You must configure storage for the Image Registry Operator. For non-production clusters, you can set the image registry to an empty directory. If you do so, all images are lost if you restart the registry. Procedure To set the image registry storage to an empty directory: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}' Warning Configure this option for only non-production clusters. If you run this command before the Image Registry Operator initializes its components, the oc patch command fails with the following error: Error from server (NotFound): configs.imageregistry.operator.openshift.io "cluster" not found Wait a few minutes and run the command again. 3.6.3.3. Configuring block registry storage for bare metal To allow the image registry to use block storage types during upgrades as a cluster administrator, you can use the Recreate rollout strategy. Important Block storage volumes, or block persistent volumes, are supported but not recommended for use with the image registry on production clusters. An installation where the registry is configured on block storage is not highly available because the registry cannot have more than one replica. If you choose to use a block storage volume with the image registry, you must use a filesystem persistent volume claim (PVC). Procedure Enter the following command to set the image registry storage as a block storage type, patch the registry so that it uses the Recreate rollout strategy, and runs with only one ( 1 ) replica: USD oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{"spec":{"rolloutStrategy":"Recreate","replicas":1}}' Provision the PV for the block storage device, and create a PVC for that volume. The requested block volume uses the ReadWriteOnce (RWO) access mode. Create a pvc.yaml file with the following contents to define a VMware vSphere PersistentVolumeClaim object: kind: PersistentVolumeClaim apiVersion: v1 metadata: name: image-registry-storage 1 namespace: openshift-image-registry 2 spec: accessModes: - ReadWriteOnce 3 resources: requests: storage: 100Gi 4 1 A unique name that represents the PersistentVolumeClaim object. 2 The namespace for the PersistentVolumeClaim object, which is openshift-image-registry . 3 The access mode of the persistent volume claim. With ReadWriteOnce , the volume can be mounted with read and write permissions by a single node. 4 The size of the persistent volume claim. Enter the following command to create the PersistentVolumeClaim object from the file: USD oc create -f pvc.yaml -n openshift-image-registry Enter the following command to edit the registry configuration so that it references the correct PVC: USD oc edit config.imageregistry.operator.openshift.io -o yaml Example output storage: pvc: claim: 1 1 By creating a custom PVC, you can leave the claim field blank for the default automatic creation of an image-registry-storage PVC. 3.6.3.4. Configuring the Image Registry Operator to use Ceph RGW storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Ceph RGW storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Ceph RGW object storage. Procedure Create the object bucket claim using the ocs-storagecluster-ceph-rgw storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Ceph RGW object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.6.3.5. Configuring the Image Registry Operator to use Noobaa storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Noobaa storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Noobaa object storage. Procedure Create the object bucket claim using the openshift-storage.noobaa.io storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_ACCESS_KEY_ID:" | head -n1 | awk '{print USD2}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_SECRET_ACCESS_KEY:" | head -n1 | awk '{print USD2}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Nooba object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.6.4. Configuring the Image Registry Operator to use CephFS storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use CephFS storage. Note CephFS uses persistent volume claim (PVC) storage. It is not recommended to use PVCs for image registry storage if there are other options are available, such as Ceph RGW or Noobaa. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and CephFS file storage. Procedure Create a PVC to use the cephfs storage class. For example: cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF Configure the image registry to use the CephFS file system storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","pvc":{"claim":"registry-storage-pvc"}}}}' --type=merge 3.6.5. Additional resources Recommended configurable storage technology Configuring Image Registry to use OpenShift Data Foundation 3.7. Configuring the registry for vSphere 3.7.1. Image registry removed during installation On platforms that do not provide shareable object storage, the OpenShift Image Registry Operator bootstraps itself as Removed . This allows openshift-installer to complete installations on these platform types. After installation, you must edit the Image Registry Operator configuration to switch the managementState from Removed to Managed . When this has completed, you must configure storage. 3.7.2. Changing the image registry's management state To start the image registry, you must change the Image Registry Operator configuration's managementState from Removed to Managed . Procedure Change managementState Image Registry Operator configuration from Removed to Managed . For example: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"managementState":"Managed"}}' 3.7.3. Image registry storage configuration The Image Registry Operator is not initially available for platforms that do not provide default storage. After installation, you must configure your registry to use storage so that the Registry Operator is made available. Instructions are shown for configuring a persistent volume, which is required for production clusters. Where applicable, instructions are shown for configuring an empty directory as the storage location, which is available for only non-production clusters. Additional instructions are provided for allowing the image registry to use block storage types by using the Recreate rollout strategy during upgrades. 3.7.3.1. Configuring registry storage for VMware vSphere As a cluster administrator, following installation you must configure your registry to use storage. Prerequisites Cluster administrator permissions. A cluster on VMware vSphere. Persistent storage provisioned for your cluster, such as Red Hat OpenShift Data Foundation. Important OpenShift Container Platform supports ReadWriteOnce access for image registry storage when you have only one replica. ReadWriteOnce access also requires that the registry uses the Recreate rollout strategy. To deploy an image registry that supports high availability with two or more replicas, ReadWriteMany access is required. Must have "100Gi" capacity. Important Testing shows issues with using the NFS server on RHEL as storage backend for core services. This includes the OpenShift Container Registry and Quay, Prometheus for monitoring storage, and Elasticsearch for logging storage. Therefore, using RHEL NFS to back PVs used by core services is not recommended. Other NFS implementations on the marketplace might not have these issues. Contact the individual NFS implementation vendor for more information on any testing that was possibly completed against these OpenShift Container Platform core components. Procedure To configure your registry to use storage, change the spec.storage.pvc in the configs.imageregistry/cluster resource. Note When you use shared storage, review your security settings to prevent outside access. Verify that you do not have a registry pod: USD oc get pod -n openshift-image-registry -l docker-registry=default Example output No resourses found in openshift-image-registry namespace Note If you do have a registry pod in your output, you do not need to continue with this procedure. Check the registry configuration: USD oc edit configs.imageregistry.operator.openshift.io Example output storage: pvc: claim: 1 1 Leave the claim field blank to allow the automatic creation of an image-registry-storage persistent volume claim (PVC). The PVC is generated based on the default storage class. However, be aware that the default storage class might provide ReadWriteOnce (RWO) volumes, such as a RADOS Block Device (RBD), which can cause issues when you replicate to more than one replica. Check the clusteroperator status: USD oc get clusteroperator image-registry Example output NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.7 True False False 6h50m 3.7.3.2. Configuring storage for the image registry in non-production clusters You must configure storage for the Image Registry Operator. For non-production clusters, you can set the image registry to an empty directory. If you do so, all images are lost if you restart the registry. Procedure To set the image registry storage to an empty directory: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}' Warning Configure this option for only non-production clusters. If you run this command before the Image Registry Operator initializes its components, the oc patch command fails with the following error: Error from server (NotFound): configs.imageregistry.operator.openshift.io "cluster" not found Wait a few minutes and run the command again. 3.7.3.3. Configuring block registry storage for VMware vSphere To allow the image registry to use block storage types such as vSphere Virtual Machine Disk (VMDK) during upgrades as a cluster administrator, you can use the Recreate rollout strategy. Important Block storage volumes are supported but not recommended for use with image registry on production clusters. An installation where the registry is configured on block storage is not highly available because the registry cannot have more than one replica. Procedure Enter the following command to set the image registry storage as a block storage type, patch the registry so that it uses the Recreate rollout strategy, and runs with only 1 replica: USD oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{"spec":{"rolloutStrategy":"Recreate","replicas":1}}' Provision the PV for the block storage device, and create a PVC for that volume. The requested block volume uses the ReadWriteOnce (RWO) access mode. Create a pvc.yaml file with the following contents to define a VMware vSphere PersistentVolumeClaim object: kind: PersistentVolumeClaim apiVersion: v1 metadata: name: image-registry-storage 1 namespace: openshift-image-registry 2 spec: accessModes: - ReadWriteOnce 3 resources: requests: storage: 100Gi 4 1 A unique name that represents the PersistentVolumeClaim object. 2 The namespace for the PersistentVolumeClaim object, which is openshift-image-registry . 3 The access mode of the persistent volume claim. With ReadWriteOnce , the volume can be mounted with read and write permissions by a single node. 4 The size of the persistent volume claim. Enter the following command to create the PersistentVolumeClaim object from the file: USD oc create -f pvc.yaml -n openshift-image-registry Enter the following command to edit the registry configuration so that it references the correct PVC: USD oc edit config.imageregistry.operator.openshift.io -o yaml Example output storage: pvc: claim: 1 1 By creating a custom PVC, you can leave the claim field blank for the default automatic creation of an image-registry-storage PVC. For instructions about configuring registry storage so that it references the correct PVC, see Configuring the registry for vSphere . 3.7.3.4. Configuring the Image Registry Operator to use Ceph RGW storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Ceph RGW storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Ceph RGW object storage. Procedure Create the object bucket claim using the ocs-storagecluster-ceph-rgw storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Ceph RGW object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.7.3.5. Configuring the Image Registry Operator to use Noobaa storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Noobaa storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Noobaa object storage. Procedure Create the object bucket claim using the openshift-storage.noobaa.io storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_ACCESS_KEY_ID:" | head -n1 | awk '{print USD2}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_SECRET_ACCESS_KEY:" | head -n1 | awk '{print USD2}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Nooba object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.7.4. Configuring the Image Registry Operator to use CephFS storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use CephFS storage. Note CephFS uses persistent volume claim (PVC) storage. It is not recommended to use PVCs for image registry storage if there are other options are available, such as Ceph RGW or Noobaa. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and CephFS file storage. Procedure Create a PVC to use the cephfs storage class. For example: cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF Configure the image registry to use the CephFS file system storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","pvc":{"claim":"registry-storage-pvc"}}}}' --type=merge 3.7.5. Additional resources Recommended configurable storage technology Configuring Image Registry to use OpenShift Data Foundation 3.8. Configuring the registry for Red Hat OpenShift Data Foundation To configure the OpenShift image registry on bare metal and vSphere to use Red Hat OpenShift Data Foundation storage, you must install OpenShift Data Foundation and then configure image registry using Ceph or Noobaa. 3.8.1. Configuring the Image Registry Operator to use Ceph RGW storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Ceph RGW storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Ceph RGW object storage. Procedure Create the object bucket claim using the ocs-storagecluster-ceph-rgw storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Ceph RGW object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.8.2. Configuring the Image Registry Operator to use Noobaa storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Noobaa storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Noobaa object storage. Procedure Create the object bucket claim using the openshift-storage.noobaa.io storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_ACCESS_KEY_ID:" | head -n1 | awk '{print USD2}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_SECRET_ACCESS_KEY:" | head -n1 | awk '{print USD2}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Nooba object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.8.3. Configuring the Image Registry Operator to use CephFS storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use CephFS storage. Note CephFS uses persistent volume claim (PVC) storage. It is not recommended to use PVCs for image registry storage if there are other options are available, such as Ceph RGW or Noobaa. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and CephFS file storage. Procedure Create a PVC to use the cephfs storage class. For example: cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF Configure the image registry to use the CephFS file system storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","pvc":{"claim":"registry-storage-pvc"}}}}' --type=merge 3.8.4. Additional resources Configuring Image Registry to use OpenShift Data Foundation Performance tuning guide for Multicloud Object Gateway (NooBaa) 3.9. Configuring the registry for Nutanix By following the steps outlined in this documentation, users can optimize container image distribution, security, and access controls, enabling a robust foundation for Nutanix applications on OpenShift Container Platform 3.9.1. Image registry removed during installation On platforms that do not provide shareable object storage, the OpenShift Image Registry Operator bootstraps itself as Removed . This allows openshift-installer to complete installations on these platform types. After installation, you must edit the Image Registry Operator configuration to switch the managementState from Removed to Managed . When this has completed, you must configure storage. 3.9.2. Changing the image registry's management state To start the image registry, you must change the Image Registry Operator configuration's managementState from Removed to Managed . Procedure Change managementState Image Registry Operator configuration from Removed to Managed . For example: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"managementState":"Managed"}}' 3.9.3. Image registry storage configuration The Image Registry Operator is not initially available for platforms that do not provide default storage. After installation, you must configure your registry to use storage so that the Registry Operator is made available. Instructions are shown for configuring a persistent volume, which is required for production clusters. Where applicable, instructions are shown for configuring an empty directory as the storage location, which is available for only non-production clusters. Additional instructions are provided for allowing the image registry to use block storage types by using the Recreate rollout strategy during upgrades. 3.9.3.1. Configuring registry storage for Nutanix As a cluster administrator, following installation you must configure your registry to use storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have a cluster on Nutanix. You have provisioned persistent storage for your cluster, such as Red Hat OpenShift Data Foundation. Important OpenShift Container Platform supports ReadWriteOnce access for image registry storage when you have only one replica. ReadWriteOnce access also requires that the registry uses the Recreate rollout strategy. To deploy an image registry that supports high availability with two or more replicas, ReadWriteMany access is required. You must have 100 Gi capacity. Procedure To configure your registry to use storage, change the spec.storage.pvc in the configs.imageregistry/cluster resource. Note When you use shared storage, review your security settings to prevent outside access. Verify that you do not have a registry pod: USD oc get pod -n openshift-image-registry -l docker-registry=default Example output No resourses found in openshift-image-registry namespace Note If you do have a registry pod in your output, you do not need to continue with this procedure. Check the registry configuration: USD oc edit configs.imageregistry.operator.openshift.io Example output storage: pvc: claim: 1 1 Leave the claim field blank to allow the automatic creation of an image-registry-storage persistent volume claim (PVC). The PVC is generated based on the default storage class. However, be aware that the default storage class might provide ReadWriteOnce (RWO) volumes, such as a RADOS Block Device (RBD), which can cause issues when you replicate to more than one replica. Check the clusteroperator status: USD oc get clusteroperator image-registry Example output NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.13 True False False 6h50m 3.9.3.2. Configuring storage for the image registry in non-production clusters You must configure storage for the Image Registry Operator. For non-production clusters, you can set the image registry to an empty directory. If you do so, all images are lost if you restart the registry. Procedure To set the image registry storage to an empty directory: USD oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{"spec":{"storage":{"emptyDir":{}}}}' Warning Configure this option for only non-production clusters. If you run this command before the Image Registry Operator initializes its components, the oc patch command fails with the following error: Error from server (NotFound): configs.imageregistry.operator.openshift.io "cluster" not found Wait a few minutes and run the command again. 3.9.3.3. Configuring block registry storage for Nutanix volumes To allow the image registry to use block storage types such as Nutanix volumes during upgrades as a cluster administrator, you can use the Recreate rollout strategy. Important Block storage volumes, or block persistent volumes, are supported but not recommended for use with the image registry on production clusters. An installation where the registry is configured on block storage is not highly available because the registry cannot have more than one replica. If you choose to use a block storage volume with the image registry, you must use a filesystem persistent volume claim (PVC). Procedure Enter the following command to set the image registry storage as a block storage type, patch the registry so that it uses the Recreate rollout strategy, and runs with only one ( 1 ) replica: USD oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{"spec":{"rolloutStrategy":"Recreate","replicas":1}}' Provision the PV for the block storage device, and create a PVC for that volume. The requested block volume uses the ReadWriteOnce (RWO) access mode. Create a pvc.yaml file with the following contents to define a Nutanix PersistentVolumeClaim object: kind: PersistentVolumeClaim apiVersion: v1 metadata: name: image-registry-storage 1 namespace: openshift-image-registry 2 spec: accessModes: - ReadWriteOnce 3 resources: requests: storage: 100Gi 4 1 A unique name that represents the PersistentVolumeClaim object. 2 The namespace for the PersistentVolumeClaim object, which is openshift-image-registry . 3 The access mode of the persistent volume claim. With ReadWriteOnce , the volume can be mounted with read and write permissions by a single node. 4 The size of the persistent volume claim. Enter the following command to create the PersistentVolumeClaim object from the file: USD oc create -f pvc.yaml -n openshift-image-registry Enter the following command to edit the registry configuration so that it references the correct PVC: USD oc edit config.imageregistry.operator.openshift.io -o yaml Example output storage: pvc: claim: 1 1 By creating a custom PVC, you can leave the claim field blank for the default automatic creation of an image-registry-storage PVC. 3.9.3.4. Configuring the Image Registry Operator to use Ceph RGW storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Ceph RGW storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Ceph RGW object storage. Procedure Create the object bucket claim using the ocs-storagecluster-ceph-rgw storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Ceph RGW object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.9.3.5. Configuring the Image Registry Operator to use Noobaa storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use Noobaa storage. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and Noobaa object storage. Procedure Create the object bucket claim using the openshift-storage.noobaa.io storage class. For example: cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF 1 Alternatively, you can use the openshift-image-registry namespace. Get the bucket name by entering the following command: USD bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}') Get the AWS credentials by entering the following commands: USD AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_ACCESS_KEY_ID:" | head -n1 | awk '{print USD2}' | base64 --decode) USD AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w "AWS_SECRET_ACCESS_KEY:" | head -n1 | awk '{print USD2}' | base64 --decode) Create the secret image-registry-private-configuration-user with the AWS credentials for the new bucket under openshift-image-registry project by entering the following command: USD oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry Get the route host by entering the following command: USD route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}') Create a config map that uses an ingress certificate by entering the following commands: USD oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // "router-certs-default"' -r) -n openshift-ingress --confirm USD oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config Configure the image registry to use the Nooba object storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","s3":{"bucket":'\"USD{bucket_name}\"',"region":"us-east-1","regionEndpoint":'\"https://USD{route_host}\"',"virtualHostedStyle":false,"encrypt":false,"trustedCA":{"name":"image-registry-s3-bundle"}}}}}' --type=merge 3.9.4. Configuring the Image Registry Operator to use CephFS storage with Red Hat OpenShift Data Foundation Red Hat OpenShift Data Foundation integrates multiple storage types that you can use with the OpenShift image registry: Ceph, a shared and distributed file system and on-premises object storage NooBaa, providing a Multicloud Object Gateway This document outlines the procedure to configure the image registry to use CephFS storage. Note CephFS uses persistent volume claim (PVC) storage. It is not recommended to use PVCs for image registry storage if there are other options are available, such as Ceph RGW or Noobaa. Prerequisites You have access to the cluster as a user with the cluster-admin role. You have access to the OpenShift Container Platform web console. You installed the oc CLI. You installed the OpenShift Data Foundation Operator to provide object storage and CephFS file storage. Procedure Create a PVC to use the cephfs storage class. For example: cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF Configure the image registry to use the CephFS file system storage by entering the following command: USD oc patch config.image/cluster -p '{"spec":{"managementState":"Managed","replicas":2,"storage":{"managementState":"Unmanaged","pvc":{"claim":"registry-storage-pvc"}}}}' --type=merge 3.9.5. Additional resources Recommended configurable storage technology Configuring Image Registry to use OpenShift Data Foundation
[ "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=myaccesskey --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=mysecretkey --namespace openshift-image-registry", "oc edit configs.imageregistry.operator.openshift.io/cluster", "storage: s3: bucket: <bucket-name> region: <region-name>", "regionEndpoint: http://rook-ceph-rgw-ocs-storagecluster-cephobjectstore.openshift-storage.svc.cluster.local", "oc create secret generic image-registry-private-configuration-user --from-file=REGISTRY_STORAGE_GCS_KEYFILE=<path_to_keyfile> --namespace openshift-image-registry", "oc edit configs.imageregistry.operator.openshift.io/cluster", "storage: gcs: bucket: <bucket-name> projectID: <project-id> region: <region-name>", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"disableRedirect\":true}}'", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_SWIFT_USERNAME=<username> --from-literal=REGISTRY_STORAGE_SWIFT_PASSWORD=<password> -n openshift-image-registry", "oc edit configs.imageregistry.operator.openshift.io/cluster", "storage: swift: container: <container-id>", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_AZURE_ACCOUNTKEY=<accountkey> --namespace openshift-image-registry", "oc edit configs.imageregistry.operator.openshift.io/cluster", "storage: azure: accountName: <storage-account-name> container: <container-name>", "oc edit configs.imageregistry.operator.openshift.io/cluster", "storage: azure: accountName: <storage-account-name> container: <container-name> cloudName: AzureUSGovernmentCloud 1", "apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: custom-csi-storageclass provisioner: cinder.csi.openstack.org volumeBindingMode: WaitForFirstConsumer allowVolumeExpansion: true parameters: availability: <availability_zone_name>", "oc apply -f <storage_class_file_name>", "storageclass.storage.k8s.io/custom-csi-storageclass created", "apiVersion: v1 kind: PersistentVolumeClaim metadata: name: csi-pvc-imageregistry namespace: openshift-image-registry 1 annotations: imageregistry.openshift.io: \"true\" spec: accessModes: - ReadWriteOnce volumeMode: Filesystem resources: requests: storage: 100Gi 2 storageClassName: <your_custom_storage_class> 3", "oc apply -f <pvc_file_name>", "persistentvolumeclaim/csi-pvc-imageregistry created", "oc patch configs.imageregistry.operator.openshift.io/cluster --type 'json' -p='[{\"op\": \"replace\", \"path\": \"/spec/storage/pvc/claim\", \"value\": \"csi-pvc-imageregistry\"}]'", "config.imageregistry.operator.openshift.io/cluster patched", "oc get configs.imageregistry.operator.openshift.io/cluster -o yaml", "status: managementState: Managed pvc: claim: csi-pvc-imageregistry", "oc get pvc -n openshift-image-registry csi-pvc-imageregistry", "NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE csi-pvc-imageregistry Bound pvc-72a8f9c9-f462-11e8-b6b6-fa163e18b7b5 100Gi RWO custom-csi-storageclass 11m", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"managementState\":\"Managed\"}}'", "oc get pod -n openshift-image-registry -l docker-registry=default", "No resources found in openshift-image-registry namespace", "oc edit configs.imageregistry.operator.openshift.io", "storage: pvc: claim:", "oc get clusteroperator image-registry", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.18 True False False 6h50m", "oc edit configs.imageregistry/cluster", "managementState: Removed", "managementState: Managed", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}'", "Error from server (NotFound): configs.imageregistry.operator.openshift.io \"cluster\" not found", "oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{\"spec\":{\"rolloutStrategy\":\"Recreate\",\"replicas\":1}}'", "kind: PersistentVolumeClaim apiVersion: v1 metadata: name: image-registry-storage 1 namespace: openshift-image-registry 2 spec: accessModes: - ReadWriteOnce 3 resources: requests: storage: 100Gi 4", "oc create -f pvc.yaml -n openshift-image-registry", "oc edit config.imageregistry.operator.openshift.io -o yaml", "storage: pvc: claim: 1", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF", "bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF", "bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_ACCESS_KEY_ID:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_SECRET_ACCESS_KEY:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"pvc\":{\"claim\":\"registry-storage-pvc\"}}}}' --type=merge", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"managementState\":\"Managed\"}}'", "oc get pod -n openshift-image-registry -l docker-registry=default", "No resourses found in openshift-image-registry namespace", "oc edit configs.imageregistry.operator.openshift.io", "storage: pvc: claim: 1", "oc get clusteroperator image-registry", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.7 True False False 6h50m", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}'", "Error from server (NotFound): configs.imageregistry.operator.openshift.io \"cluster\" not found", "oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{\"spec\":{\"rolloutStrategy\":\"Recreate\",\"replicas\":1}}'", "kind: PersistentVolumeClaim apiVersion: v1 metadata: name: image-registry-storage 1 namespace: openshift-image-registry 2 spec: accessModes: - ReadWriteOnce 3 resources: requests: storage: 100Gi 4", "oc create -f pvc.yaml -n openshift-image-registry", "oc edit config.imageregistry.operator.openshift.io -o yaml", "storage: pvc: claim: 1", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF", "bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF", "bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_ACCESS_KEY_ID:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_SECRET_ACCESS_KEY:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"pvc\":{\"claim\":\"registry-storage-pvc\"}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF", "bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF", "bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_ACCESS_KEY_ID:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_SECRET_ACCESS_KEY:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"pvc\":{\"claim\":\"registry-storage-pvc\"}}}}' --type=merge", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"managementState\":\"Managed\"}}'", "oc get pod -n openshift-image-registry -l docker-registry=default", "No resourses found in openshift-image-registry namespace", "oc edit configs.imageregistry.operator.openshift.io", "storage: pvc: claim: 1", "oc get clusteroperator image-registry", "NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE MESSAGE image-registry 4.13 True False False 6h50m", "oc patch configs.imageregistry.operator.openshift.io cluster --type merge --patch '{\"spec\":{\"storage\":{\"emptyDir\":{}}}}'", "Error from server (NotFound): configs.imageregistry.operator.openshift.io \"cluster\" not found", "oc patch config.imageregistry.operator.openshift.io/cluster --type=merge -p '{\"spec\":{\"rolloutStrategy\":\"Recreate\",\"replicas\":1}}'", "kind: PersistentVolumeClaim apiVersion: v1 metadata: name: image-registry-storage 1 namespace: openshift-image-registry 2 spec: accessModes: - ReadWriteOnce 3 resources: requests: storage: 100Gi 4", "oc create -f pvc.yaml -n openshift-image-registry", "oc edit config.imageregistry.operator.openshift.io -o yaml", "storage: pvc: claim: 1", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: rgwbucket namespace: openshift-storage 1 spec: storageClassName: ocs-storagecluster-ceph-rgw generateBucketName: rgwbucket EOF", "bucket_name=USD(oc get obc -n openshift-storage rgwbucket -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage rgwbucket -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route ocs-storagecluster-cephobjectstore -n openshift-storage --template='{{ .spec.host }}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: noobaatest namespace: openshift-storage 1 spec: storageClassName: openshift-storage.noobaa.io generateBucketName: noobaatest EOF", "bucket_name=USD(oc get obc -n openshift-storage noobaatest -o jsonpath='{.spec.bucketName}')", "AWS_ACCESS_KEY_ID=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_ACCESS_KEY_ID:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "AWS_SECRET_ACCESS_KEY=USD(oc get secret -n openshift-storage noobaatest -o yaml | grep -w \"AWS_SECRET_ACCESS_KEY:\" | head -n1 | awk '{print USD2}' | base64 --decode)", "oc create secret generic image-registry-private-configuration-user --from-literal=REGISTRY_STORAGE_S3_ACCESSKEY=USD{AWS_ACCESS_KEY_ID} --from-literal=REGISTRY_STORAGE_S3_SECRETKEY=USD{AWS_SECRET_ACCESS_KEY} --namespace openshift-image-registry", "route_host=USD(oc get route s3 -n openshift-storage -o=jsonpath='{.spec.host}')", "oc extract secret/USD(oc get ingresscontroller -n openshift-ingress-operator default -o json | jq '.spec.defaultCertificate.name // \"router-certs-default\"' -r) -n openshift-ingress --confirm", "oc create configmap image-registry-s3-bundle --from-file=ca-bundle.crt=./tls.crt -n openshift-config", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"s3\":{\"bucket\":'\\\"USD{bucket_name}\\\"',\"region\":\"us-east-1\",\"regionEndpoint\":'\\\"https://USD{route_host}\\\"',\"virtualHostedStyle\":false,\"encrypt\":false,\"trustedCA\":{\"name\":\"image-registry-s3-bundle\"}}}}}' --type=merge", "cat <<EOF | oc apply -f - apiVersion: v1 kind: PersistentVolumeClaim metadata: name: registry-storage-pvc namespace: openshift-image-registry spec: accessModes: - ReadWriteMany resources: requests: storage: 100Gi storageClassName: ocs-storagecluster-cephfs EOF", "oc patch config.image/cluster -p '{\"spec\":{\"managementState\":\"Managed\",\"replicas\":2,\"storage\":{\"managementState\":\"Unmanaged\",\"pvc\":{\"claim\":\"registry-storage-pvc\"}}}}' --type=merge" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/registry/setting-up-and-configuring-the-registry
Chapter 6. Encrypting cluster transport
Chapter 6. Encrypting cluster transport Secure cluster transport so that nodes communicate with encrypted messages. You can also configure Data Grid clusters to perform certificate authentication so that only nodes with valid identities can join. 6.1. Securing cluster transport with TLS identities Add SSL/TLS identities to a Data Grid Server security realm and use them to secure cluster transport. Nodes in the Data Grid Server cluster then exchange SSL/TLS certificates to encrypt JGroups messages, including RELAY messages if you configure cross-site replication. Prerequisites Install a Data Grid Server cluster. Procedure Create a TLS keystore that contains a single certificate to identify Data Grid Server. You can also use a PEM file if it contains a private key in PKCS#1 or PKCS#8 format, a certificate, and has an empty password: password="" . Note If the certificate in the keystore is not signed by a public certificate authority (CA) then you must also create a trust store that contains either the signing certificate or the public key. Add the keystore to the USDRHDG_HOME/server/conf directory. Add the keystore to a new security realm in your Data Grid Server configuration. Important You should create dedicated keystores and security realms so that Data Grid Server endpoints do not use the same security realm as cluster transport. <server xmlns="urn:infinispan:server:15.0"> <security> <security-realms> <security-realm name="cluster-transport"> <server-identities> <ssl> <!-- Adds a keystore that contains a certificate that provides SSL/TLS identity to encrypt cluster transport. --> <keystore path="server.pfx" relative-to="infinispan.server.config.path" password="secret" alias="server"/> </ssl> </server-identities> </security-realm> </security-realms> </security> </server> Configure cluster transport to use the security realm by specifying the name of the security realm with the server:security-realm attribute. <infinispan> <cache-container> <transport server:security-realm="cluster-transport"/> </cache-container> </infinispan> Verification When you start Data Grid Server, the following log message indicates that the cluster is using the security realm for cluster transport: 6.2. JGroups encryption protocols To secure cluster traffic, you can configure Data Grid nodes to encrypt JGroups message payloads with secret keys. Data Grid nodes can obtain secret keys from either: The coordinator node (asymmetric encryption). A shared keystore (symmetric encryption). Retrieving secret keys from coordinator nodes You configure asymmetric encryption by adding the ASYM_ENCRYPT protocol to a JGroups stack in your Data Grid configuration. This allows Data Grid clusters to generate and distribute secret keys. Important When using asymmetric encryption, you should also provide keystores so that nodes can perform certificate authentication and securely exchange secret keys. This protects your cluster from man-in-the-middle (MitM) attacks. Asymmetric encryption secures cluster traffic as follows: The first node in the Data Grid cluster, the coordinator node, generates a secret key. A joining node performs certificate authentication with the coordinator to mutually verify identity. The joining node requests the secret key from the coordinator node. That request includes the public key for the joining node. The coordinator node encrypts the secret key with the public key and returns it to the joining node. The joining node decrypts and installs the secret key. The node joins the cluster, encrypting and decrypting messages with the secret key. Retrieving secret keys from shared keystores You configure symmetric encryption by adding the SYM_ENCRYPT protocol to a JGroups stack in your Data Grid configuration. This allows Data Grid clusters to obtain secret keys from keystores that you provide. Nodes install the secret key from a keystore on the Data Grid classpath at startup. Node join clusters, encrypting and decrypting messages with the secret key. Comparison of asymmetric and symmetric encryption ASYM_ENCRYPT with certificate authentication provides an additional layer of encryption in comparison with SYM_ENCRYPT . You provide keystores that encrypt the requests to coordinator nodes for the secret key. Data Grid automatically generates that secret key and handles cluster traffic, while letting you specify when to generate secret keys. For example, you can configure clusters to generate new secret keys when nodes leave. This ensures that nodes cannot bypass certificate authentication and join with old keys. SYM_ENCRYPT , on the other hand, is faster than ASYM_ENCRYPT because nodes do not need to exchange keys with the cluster coordinator. A potential drawback to SYM_ENCRYPT is that there is no configuration to automatically generate new secret keys when cluster membership changes. Users are responsible for generating and distributing the secret keys that nodes use to encrypt cluster traffic. 6.3. Securing cluster transport with asymmetric encryption Configure Data Grid clusters to generate and distribute secret keys that encrypt JGroups messages. Procedure Create a keystore with certificate chains that enables Data Grid to verify node identity. Place the keystore on the classpath for each node in the cluster. For Data Grid Server, you put the keystore in the USDRHDG_HOME directory. Add the SSL_KEY_EXCHANGE and ASYM_ENCRYPT protocols to a JGroups stack in your Data Grid configuration, as in the following example: <infinispan> <jgroups> <!-- Creates a secure JGroups stack named "encrypt-tcp" that extends the default TCP stack. --> <stack name="encrypt-tcp" extends="tcp"> <!-- Adds a keystore that nodes use to perform certificate authentication. --> <!-- Uses the stack.combine and stack.position attributes to insert SSL_KEY_EXCHANGE into the default TCP stack after VERIFY_SUSPECT2. --> <SSL_KEY_EXCHANGE keystore_name="mykeystore.jks" keystore_password="changeit" stack.combine="INSERT_AFTER" stack.position="VERIFY_SUSPECT2"/> <!-- Configures ASYM_ENCRYPT --> <!-- Uses the stack.combine and stack.position attributes to insert ASYM_ENCRYPT into the default TCP stack before pbcast.NAKACK2. --> <!-- The use_external_key_exchange = "true" attribute configures nodes to use the `SSL_KEY_EXCHANGE` protocol for certificate authentication. --> <ASYM_ENCRYPT asym_keylength="2048" asym_algorithm="RSA" change_key_on_coord_leave = "false" change_key_on_leave = "false" use_external_key_exchange = "true" stack.combine="INSERT_BEFORE" stack.position="pbcast.NAKACK2"/> </stack> </jgroups> <cache-container name="default" statistics="true"> <!-- Configures the cluster to use the JGroups stack. --> <transport cluster="USD{infinispan.cluster.name}" stack="encrypt-tcp" node-name="USD{infinispan.node.name:}"/> </cache-container> </infinispan> Verification When you start your Data Grid cluster, the following log message indicates that the cluster is using the secure JGroups stack: Data Grid nodes can join the cluster only if they use ASYM_ENCRYPT and can obtain the secret key from the coordinator node. Otherwise the following message is written to Data Grid logs: Additional resources JGroups 4 Manual JGroups 4.2 Schema 6.4. Securing cluster transport with symmetric encryption Configure Data Grid clusters to encrypt JGroups messages with secret keys from keystores that you provide. Procedure Create a keystore that contains a secret key. Place the keystore on the classpath for each node in the cluster. For Data Grid Server, you put the keystore in the USDRHDG_HOME directory. Add the SYM_ENCRYPT protocol to a JGroups stack in your Data Grid configuration. <infinispan> <jgroups> <!-- Creates a secure JGroups stack named "encrypt-tcp" that extends the default TCP stack. --> <stack name="encrypt-tcp" extends="tcp"> <!-- Adds a keystore from which nodes obtain secret keys. --> <!-- Uses the stack.combine and stack.position attributes to insert SYM_ENCRYPT into the default TCP stack after VERIFY_SUSPECT2. --> <SYM_ENCRYPT keystore_name="myKeystore.p12" keystore_type="PKCS12" store_password="changeit" key_password="changeit" alias="myKey" stack.combine="INSERT_AFTER" stack.position="VERIFY_SUSPECT2"/> </stack> </jgroups> <cache-container name="default" statistics="true"> <!-- Configures the cluster to use the JGroups stack. --> <transport cluster="USD{infinispan.cluster.name}" stack="encrypt-tcp" node-name="USD{infinispan.node.name:}"/> </cache-container> </infinispan> Verification When you start your Data Grid cluster, the following log message indicates that the cluster is using the secure JGroups stack: Data Grid nodes can join the cluster only if they use SYM_ENCRYPT and can obtain the secret key from the shared keystore. Otherwise the following message is written to Data Grid logs: Additional resources JGroups 4 Manual JGroups 4.2 Schema
[ "<server xmlns=\"urn:infinispan:server:15.0\"> <security> <security-realms> <security-realm name=\"cluster-transport\"> <server-identities> <ssl> <!-- Adds a keystore that contains a certificate that provides SSL/TLS identity to encrypt cluster transport. --> <keystore path=\"server.pfx\" relative-to=\"infinispan.server.config.path\" password=\"secret\" alias=\"server\"/> </ssl> </server-identities> </security-realm> </security-realms> </security> </server>", "<infinispan> <cache-container> <transport server:security-realm=\"cluster-transport\"/> </cache-container> </infinispan>", "[org.infinispan.SERVER] ISPN080060: SSL Transport using realm <security_realm_name>", "<infinispan> <jgroups> <!-- Creates a secure JGroups stack named \"encrypt-tcp\" that extends the default TCP stack. --> <stack name=\"encrypt-tcp\" extends=\"tcp\"> <!-- Adds a keystore that nodes use to perform certificate authentication. --> <!-- Uses the stack.combine and stack.position attributes to insert SSL_KEY_EXCHANGE into the default TCP stack after VERIFY_SUSPECT2. --> <SSL_KEY_EXCHANGE keystore_name=\"mykeystore.jks\" keystore_password=\"changeit\" stack.combine=\"INSERT_AFTER\" stack.position=\"VERIFY_SUSPECT2\"/> <!-- Configures ASYM_ENCRYPT --> <!-- Uses the stack.combine and stack.position attributes to insert ASYM_ENCRYPT into the default TCP stack before pbcast.NAKACK2. --> <!-- The use_external_key_exchange = \"true\" attribute configures nodes to use the `SSL_KEY_EXCHANGE` protocol for certificate authentication. --> <ASYM_ENCRYPT asym_keylength=\"2048\" asym_algorithm=\"RSA\" change_key_on_coord_leave = \"false\" change_key_on_leave = \"false\" use_external_key_exchange = \"true\" stack.combine=\"INSERT_BEFORE\" stack.position=\"pbcast.NAKACK2\"/> </stack> </jgroups> <cache-container name=\"default\" statistics=\"true\"> <!-- Configures the cluster to use the JGroups stack. --> <transport cluster=\"USD{infinispan.cluster.name}\" stack=\"encrypt-tcp\" node-name=\"USD{infinispan.node.name:}\"/> </cache-container> </infinispan>", "[org.infinispan.CLUSTER] ISPN000078: Starting JGroups channel cluster with stack <encrypted_stack_name>", "[org.jgroups.protocols.ASYM_ENCRYPT] <hostname>: received message without encrypt header from <hostname>; dropping it", "<infinispan> <jgroups> <!-- Creates a secure JGroups stack named \"encrypt-tcp\" that extends the default TCP stack. --> <stack name=\"encrypt-tcp\" extends=\"tcp\"> <!-- Adds a keystore from which nodes obtain secret keys. --> <!-- Uses the stack.combine and stack.position attributes to insert SYM_ENCRYPT into the default TCP stack after VERIFY_SUSPECT2. --> <SYM_ENCRYPT keystore_name=\"myKeystore.p12\" keystore_type=\"PKCS12\" store_password=\"changeit\" key_password=\"changeit\" alias=\"myKey\" stack.combine=\"INSERT_AFTER\" stack.position=\"VERIFY_SUSPECT2\"/> </stack> </jgroups> <cache-container name=\"default\" statistics=\"true\"> <!-- Configures the cluster to use the JGroups stack. --> <transport cluster=\"USD{infinispan.cluster.name}\" stack=\"encrypt-tcp\" node-name=\"USD{infinispan.node.name:}\"/> </cache-container> </infinispan>", "[org.infinispan.CLUSTER] ISPN000078: Starting JGroups channel cluster with stack <encrypted_stack_name>", "[org.jgroups.protocols.SYM_ENCRYPT] <hostname>: received message without encrypt header from <hostname>; dropping it" ]
https://docs.redhat.com/en/documentation/red_hat_data_grid/8.5/html/data_grid_security_guide/secure-cluster-transport
Chapter 13. Installing a three-node cluster on GCP
Chapter 13. Installing a three-node cluster on GCP In OpenShift Container Platform version 4.18, you can install a three-node cluster on Google Cloud Platform (GCP). A three-node cluster consists of three control plane machines, which also act as compute machines. This type of cluster provides a smaller, more resource efficient cluster, for cluster administrators and developers to use for testing, development, and production. You can install a three-node cluster using either installer-provisioned or user-provisioned infrastructure. 13.1. Configuring a three-node cluster You configure a three-node cluster by setting the number of worker nodes to 0 in the install-config.yaml file before deploying the cluster. Setting the number of worker nodes to 0 ensures that the control plane machines are schedulable. This allows application workloads to be scheduled to run from the control plane nodes. Note Because application workloads run from control plane nodes, additional subscriptions are required, as the control plane nodes are considered to be compute nodes. Prerequisites You have an existing install-config.yaml file. Procedure Set the number of compute replicas to 0 in your install-config.yaml file, as shown in the following compute stanza: Example install-config.yaml file for a three-node cluster apiVersion: v1 baseDomain: example.com compute: - name: worker platform: {} replicas: 0 # ... If you are deploying a cluster with user-provisioned infrastructure: After you create the Kubernetes manifest files, make sure that the spec.mastersSchedulable parameter is set to true in cluster-scheduler-02-config.yml file. You can locate this file in <installation_directory>/manifests . For more information, see "Creating the Kubernetes manifest and Ignition config files" in "Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates". Do not create additional worker nodes. Example cluster-scheduler-02-config.yml file for a three-node cluster apiVersion: config.openshift.io/v1 kind: Scheduler metadata: creationTimestamp: null name: cluster spec: mastersSchedulable: true policy: name: "" status: {} 13.2. steps Installing a cluster on GCP with customizations Installing a cluster on user-provisioned infrastructure in GCP by using Deployment Manager templates
[ "apiVersion: v1 baseDomain: example.com compute: - name: worker platform: {} replicas: 0", "apiVersion: config.openshift.io/v1 kind: Scheduler metadata: creationTimestamp: null name: cluster spec: mastersSchedulable: true policy: name: \"\" status: {}" ]
https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/installing_on_gcp/installing-gcp-three-node
Managing hybrid and multicloud resources
Managing hybrid and multicloud resources Red Hat OpenShift Data Foundation 4.16 Instructions for how to manage storage resources across a hybrid cloud or multicloud environment using the Multicloud Object Gateway (NooBaa). Red Hat Storage Documentation Team Abstract This document explains how to manage storage resources across a hybrid cloud or multicloud environment. Making open source more inclusive Red Hat is committed to replacing problematic language in our code, documentation, and web properties. We are beginning with these four terms: master, slave, blacklist, and whitelist. Because of the enormity of this endeavor, these changes will be implemented gradually over several upcoming releases. For more details, see our CTO Chris Wright's message . Providing feedback on Red Hat documentation We appreciate your input on our documentation. Do let us know how we can make it better. To give feedback, create a Bugzilla ticket: Go to the Bugzilla website. In the Component section, choose documentation . Fill in the Description field with your suggestion for improvement. Include a link to the relevant part(s) of documentation. Click Submit Bug . Chapter 1. About the Multicloud Object Gateway The Multicloud Object Gateway (MCG) is a lightweight object storage service for OpenShift, allowing users to start small and then scale as needed on-premise, in multiple clusters, and with cloud-native storage. Chapter 2. Accessing the Multicloud Object Gateway with your applications You can access the object service with any application targeting AWS S3 or code that uses AWS S3 Software Development Kit (SDK). Applications need to specify the Multicloud Object Gateway (MCG) endpoint, an access key, and a secret access key. You can use your terminal or the MCG CLI to retrieve this information. For information on accessing the RADOS Object Gateway (RGW) S3 endpoint, see Accessing the RADOS Object Gateway S3 endpoint . Prerequisites A running OpenShift Data Foundation Platform. 2.1. Accessing the Multicloud Object Gateway from the terminal Procedure Run the describe command to view information about the Multicloud Object Gateway (MCG) endpoint, including its access key ( AWS_ACCESS_KEY_ID value) and secret access key ( AWS_SECRET_ACCESS_KEY value). The output will look similar to the following: 1 access key ( AWS_ACCESS_KEY_ID value) 2 secret access key ( AWS_SECRET_ACCESS_KEY value) 3 MCG endpoint Note The output from the oc describe noobaa command lists the internal and external DNS names that are available. When using the internal DNS, the traffic is free. The external DNS uses Load Balancing to process the traffic, and therefore has a cost per hour. 2.2. Accessing the Multicloud Object Gateway from the MCG command-line interface Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Run the status command to access the endpoint, access key, and secret access key: The output will look similar to the following: 1 endpoint 2 access key 3 secret access key You have the relevant endpoint, access key, and secret access key in order to connect to your applications. For example: If AWS S3 CLI is the application, the following command will list the buckets in OpenShift Data Foundation: 2.3. Support of Multicloud Object Gateway data bucket APIs The following table lists the Multicloud Object Gateway (MCG) data bucket APIs and their support levels. Data buckets Support List buckets Supported Delete bucket Supported Replication configuration is part of MCG bucket class configuration Create bucket Supported A different set of canned ACLs Post bucket Not supported Put bucket Partially supported Replication configuration is part of MCG bucket class configuration Bucket lifecycle Partially supported Object expiration only Policy (Buckets, Objects) Partially supported Bucket policies are supported Bucket Website Supported Bucket ACLs (Get, Put) Supported A different set of canned ACLs Bucket Location Partialy Returns a default value only Bucket Notification Not supported Bucket Object Versions Supoorted Get Bucket Info (HEAD) Supported Bucket Request Payment Partially supported Returns the bucket owner Put Object Supported Delete Object Supported Get Object Supported Object ACLs (Get, Put) Supported Get Object Info (HEAD) Supported POST Object Supported Copy Object Supported Multipart Uploads Supported Object Tagging Supported Storage Class Not supported Note No support for cors, metrics, inventory, analytics, inventory, logging, notifications, accelerate, replication, request payment, locks verbs Chapter 3. Adding storage resources for hybrid or Multicloud 3.1. Creating a new backing store Use this procedure to create a new backing store in OpenShift Data Foundation. Prerequisites Administrator access to OpenShift Data Foundation. Procedure In the OpenShift Web Console, click Storage -> Object Storage . Click the Backing Store tab. Click Create Backing Store . On the Create New Backing Store page, perform the following: Enter a Backing Store Name . Select a Provider . Select a Region . Optional: Enter an Endpoint . Select a Secret from the drop-down list, or create your own secret. Optionally, you can Switch to Credentials view which lets you fill in the required secrets. For more information on creating an OCP secret, see the section Creating the secret in the Openshift Container Platform documentation. Each backingstore requires a different secret. For more information on creating the secret for a particular backingstore, see the Section 3.3, "Adding storage resources for hybrid or Multicloud using the MCG command line interface" and follow the procedure for the addition of storage resources using a YAML. Note This menu is relevant for all providers except Google Cloud and local PVC. Enter the Target bucket . The target bucket is a container storage that is hosted on the remote cloud service. It allows you to create a connection that tells the MCG that it can use this bucket for the system. Click Create Backing Store . Verification steps In the OpenShift Web Console, click Storage -> Object Storage . Click the Backing Store tab to view all the backing stores. 3.2. Overriding the default backing store You can use the manualDefaultBackingStore flag to override the default NooBaa backing store and remove it if you do not want to use the default backing store configuration. This provides flexibility to customize your backing store configuration and tailor it to your specific needs. By leveraging this feature, you can further optimize your system and enhance its performance. Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Check if noobaa-default-backing-store is present: Patch the NooBaa CR to enable manualDefaultBackingStore : Important Use the Multicloud Object Gateway CLI to create a new backing store and update accounts. Create a new default backing store to override the default backing store. For example: Replace NEW-DEFAULT-BACKING-STORE with the name you want for your new default backing store. Update the admin account to use the new default backing store as its default resource: Replace NEW-DEFAULT-BACKING-STORE with the name of the backing store from the step. Updating the default resource for admin accounts ensures that the new configuration is used throughout your system. Configure the default-bucketclass to use the new default backingstore: Optional: Delete the noobaa-default-backing-store. Delete all instances of and buckets associated with noobaa-default-backing-store and update the accounts using it as resource. Delete the noobaa-default-backing-store: You must enable the manualDefaultBackingStore flag before proceeding. Additionally, it is crucial to update all accounts that use the default resource and delete all instances of and buckets associated with the default backing store to ensure a smooth transition. 3.3. Adding storage resources for hybrid or Multicloud using the MCG command line interface The Multicloud Object Gateway (MCG) simplifies the process of spanning data across the cloud provider and clusters. Add a backing storage that can be used by the MCG. Depending on the type of your deployment, you can choose one of the following procedures to create a backing storage: For creating an AWS-backed backingstore, see Section 3.3.1, "Creating an AWS-backed backingstore" For creating an AWS-STS-backed backingstore, see Section 3.3.2, "Creating an AWS-STS-backed backingstore" For creating an IBM COS-backed backingstore, see Section 3.3.3, "Creating an IBM COS-backed backingstore" For creating an Azure-backed backingstore, see Section 3.3.4, "Creating an Azure-backed backingstore" For creating a GCP-backed backingstore, see Section 3.3.5, "Creating a GCP-backed backingstore" For creating a local Persistent Volume-backed backingstore, see Section 3.3.6, "Creating a local Persistent Volume-backed backingstore" For VMware deployments, skip to Section 3.4, "Creating an s3 compatible Multicloud Object Gateway backingstore" for further instructions. 3.3.1. Creating an AWS-backed backingstore Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Using MCG command-line interface From the MCG command-line interface, run the following command: <backingstore_name> The name of the backingstore. <AWS ACCESS KEY> and <AWS SECRET ACCESS KEY> The AWS access key ID and secret access key you created for this purpose. <bucket-name> The existing AWS bucket name. This argument indicates to the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. The output will be similar to the following: Adding storage resources using a YAML Create a secret with the credentials: <AWS ACCESS KEY> and <AWS SECRET ACCESS KEY> Supply and encode your own AWS access key ID and secret access key using Base64, and use the results for <AWS ACCESS KEY ID ENCODED IN BASE64> and <AWS SECRET ACCESS KEY ENCODED IN BASE64> . <backingstore-secret-name> The name of the backingstore secret created in the step. Apply the following YAML for a specific backing store: <bucket-name> The existing AWS bucket name. <backingstore-secret-name> The name of the backingstore secret created in the step. 3.3.2. Creating an AWS-STS-backed backingstore Amazon Web Services Security Token Service (AWS STS) is an AWS feature and it is a way to authenticate using short-lived credentials. Creating an AWS-STS-backed backingstore involves the following: Creating an AWS role using a script, which helps to get the temporary security credentials for the role session Installing OpenShift Data Foundation operator in AWS STS OpenShift cluster Creating backingstore in AWS STS OpenShift cluster 3.3.2.1. Creating an AWS role using a script You need to create a role and pass the role Amazon resource name (ARN) while installing the OpenShift Data Foundation operator. Prerequisites Configure Red Hat OpenShift Container Platform cluster with AWS STS. For more information, see Configuring an AWS cluster to use short-term credentials . Procedure Create an AWS role using a script that matches OpenID Connect (OIDC) configuration for Multicloud Object Gateway (MCG) on OpenShift Data Foundation. The following example shows the details that are required to create the role: where 123456789123 Is the AWS account ID mybucket Is the bucket name (using public bucket configuration) us-east-2 Is the AWS region openshift-storage Is the namespace name Sample script 3.3.2.2. Installing OpenShift Data Foundation operator in AWS STS OpenShift cluster Prerequisites Configure Red Hat OpenShift Container Platform cluster with AWS STS. For more information, see Configuring an AWS cluster to use short-term credentials . Create an AWS role using a script that matches OpenID Connect (OIDC) configuration. For more information, see Creating an AWS role using a script . Procedure Install OpenShift Data Foundation Operator from the Operator Hub. During the installation add the role ARN in the ARN Details field. Make sure that the Update approval field is set to Manual . 3.3.2.3. Creating a new AWS STS backingstore Prerequisites Configure Red Hat OpenShift Container Platform cluster with AWS STS. For more information, see Configuring an AWS cluster to use short-term credentials . Create an AWS role using a script that matches OpenID Connect (OIDC) configuration. For more information, see Creating an AWS role using a script . Install OpenShift Data Foundation Operator. For more information, see Installing OpenShift Data Foundation operator in AWS STS OpenShift cluster . Procedure Install Multicloud Object Gateway (MCG). It is installed with the default backingstore by using the short-lived credentials. After the MCG system is ready, you can create more backingstores of the type aws-sts-s3 using the following MCG command line interface command: where backingstore-name Name of the backingstore aws-sts-role-arn The AWS STS role ARN which will assume role region The AWS bucket region target-bucket The target bucket name on the cloud 3.3.3. Creating an IBM COS-backed backingstore Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Using command-line interface From the MCG command-line interface, run the following command: <backingstore_name> The name of the backingstore. <IBM ACCESS KEY> , <IBM SECRET ACCESS KEY> , and <IBM COS ENDPOINT> An IBM access key ID, secret access key and the appropriate regional endpoint that corresponds to the location of the existing IBM bucket. To generate the above keys on IBM cloud, you must include HMAC credentials while creating the service credentials for your target bucket. <bucket-name> An existing IBM bucket name. This argument indicates MCG about the bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. The output will be similar to the following: Adding storage resources using an YAML Create a secret with the credentials: <IBM COS ACCESS KEY ID ENCODED IN BASE64> and <IBM COS SECRET ACCESS KEY ENCODED IN BASE64> Provide and encode your own IBM COS access key ID and secret access key using Base64, and use the results in place of these attributes respectively. <backingstore-secret-name> The name of the backingstore secret. Apply the following YAML for a specific backing store: <bucket-name> an existing IBM COS bucket name. This argument indicates to MCG about the bucket to use as a target bucket for its backingstore, and subsequently, data storage and administration. <endpoint> A regional endpoint that corresponds to the location of the existing IBM bucket name. This argument indicates to MCG about the endpoint to use for its backingstore, and subsequently, data storage and administration. <backingstore-secret-name> The name of the secret created in the step. 3.3.4. Creating an Azure-backed backingstore Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Using the MCG command-line interface From the MCG command-line interface, run the following command: <backingstore_name> The name of the backingstore. <AZURE ACCOUNT KEY> and <AZURE ACCOUNT NAME> An AZURE account key and account name you created for this purpose. <blob container name> An existing Azure blob container name. This argument indicates to MCG about the bucket to use as a target bucket for its backingstore, and subsequently, data storage and administration. The output will be similar to the following: Adding storage resources using a YAML Create a secret with the credentials: <AZURE ACCOUNT NAME ENCODED IN BASE64> and <AZURE ACCOUNT KEY ENCODED IN BASE64> Supply and encode your own Azure Account Name and Account Key using Base64, and use the results in place of these attributes respectively. <backingstore-secret-name> A unique name of backingstore secret. Apply the following YAML for a specific backing store: <blob-container-name> An existing Azure blob container name. This argument indicates to the MCG about the bucket to use as a target bucket for its backingstore, and subsequently, data storage and administration. <backingstore-secret-name> with the name of the secret created in the step. 3.3.5. Creating a GCP-backed backingstore Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Using the MCG command-line interface From the MCG command-line interface, run the following command: <backingstore_name> Name of the backingstore. <PATH TO GCP PRIVATE KEY JSON FILE> A path to your GCP private key created for this purpose. <GCP bucket name> An existing GCP object storage bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. The output will be similar to the following: Adding storage resources using a YAML Create a secret with the credentials: <GCP PRIVATE KEY ENCODED IN BASE64> Provide and encode your own GCP service account private key using Base64, and use the results for this attribute. <backingstore-secret-name> A unique name of the backingstore secret. Apply the following YAML for a specific backing store: <target bucket> An existing Google storage bucket. This argument indicates to the MCG about the bucket to use as a target bucket for its backing store, and subsequently, data storage dfdand administration. <backingstore-secret-name> The name of the secret created in the step. 3.3.6. Creating a local Persistent Volume-backed backingstore Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Adding storage resources using the MCG command-line interface From the MCG command-line interface, run the following command: Note This command must be run from within the openshift-storage namespace. Adding storage resources using YAML Apply the following YAML for a specific backing store: <backingstore_name > The name of the backingstore. <NUMBER OF VOLUMES> The number of volumes you would like to create. Note that increasing the number of volumes scales up the storage. <VOLUME SIZE> Required size in GB of each volume. <CPU REQUEST> Guaranteed amount of CPU requested in CPU unit m . <MEMORY REQUEST> Guaranteed amount of memory requested. <CPU LIMIT> Maximum amount of CPU that can be consumed in CPU unit m . <MEMORY LIMIT> Maximum amount of memory that can be consumed. <LOCAL STORAGE CLASS> The local storage class name, recommended to use ocs-storagecluster-ceph-rbd . The output will be similar to the following: 3.4. Creating an s3 compatible Multicloud Object Gateway backingstore The Multicloud Object Gateway (MCG) can use any S3 compatible object storage as a backing store, for example, Red Hat Ceph Storage's RADOS Object Gateway (RGW). The following procedure shows how to create an S3 compatible MCG backing store for Red Hat Ceph Storage's RGW. Note that when the RGW is deployed, OpenShift Data Foundation operator creates an S3 compatible backingstore for MCG automatically. Procedure From the MCG command-line interface, run the following command: Note This command must be run from within the openshift-storage namespace. To get the <RGW ACCESS KEY> and <RGW SECRET KEY> , run the following command using your RGW user secret name: Decode the access key ID and the access key from Base64 and keep them. Replace <RGW USER ACCESS KEY> and <RGW USER SECRET ACCESS KEY> with the appropriate, decoded data from the step. Replace <bucket-name> with an existing RGW bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. To get the <RGW endpoint> , see Accessing the RADOS Object Gateway S3 endpoint . The output will be similar to the following: You can also create the backingstore using a YAML: Create a CephObjectStore user. This also creates a secret containing the RGW credentials: Replace <RGW-Username> and <Display-name> with a unique username and display name. Apply the following YAML for an S3-Compatible backing store: Replace <backingstore-secret-name> with the name of the secret that was created with CephObjectStore in the step. Replace <bucket-name> with an existing RGW bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. To get the <RGW endpoint> , see Accessing the RADOS Object Gateway S3 endpoint . 3.5. Creating a new bucket class Bucket class is a CRD representing a class of buckets that defines tiering policies and data placements for an Object Bucket Class. Use this procedure to create a bucket class in OpenShift Data Foundation. Procedure In the OpenShift Web Console, click Storage -> Object Storage . Click the Bucket Class tab. Click Create Bucket Class . On the Create new Bucket Class page, perform the following: Select the bucket class type and enter a bucket class name. Select the BucketClass type . Choose one of the following options: Standard : data will be consumed by a Multicloud Object Gateway (MCG), deduped, compressed and encrypted. Namespace : data is stored on the NamespaceStores without performing de-duplication, compression or encryption. By default, Standard is selected. Enter a Bucket Class Name . Click . In Placement Policy , select Tier 1 - Policy Type and click . You can choose either one of the options as per your requirements. Spread allows spreading of the data across the chosen resources. Mirror allows full duplication of the data across the chosen resources. Click Add Tier to add another policy tier. Select at least one Backing Store resource from the available list if you have selected Tier 1 - Policy Type as Spread and click . Alternatively, you can also create a new backing store . Note You need to select at least 2 backing stores when you select Policy Type as Mirror in step. Review and confirm Bucket Class settings. Click Create Bucket Class . Verification steps In the OpenShift Web Console, click Storage -> Object Storage . Click the Bucket Class tab and search the new Bucket Class. 3.6. Editing a bucket class Use the following procedure to edit the bucket class components through the YAML file by clicking the edit button on the Openshift web console. Prerequisites Administrator access to OpenShift Web Console. Procedure In the OpenShift Web Console, click Storage -> Object Storage . Click the Bucket Class tab. Click the Action Menu (...) to the Bucket class you want to edit. Click Edit Bucket Class . You are redirected to the YAML file, make the required changes in this file and click Save . 3.7. Editing backing stores for bucket class Use the following procedure to edit an existing Multicloud Object Gateway (MCG) bucket class to change the underlying backing stores used in a bucket class. Prerequisites Administrator access to OpenShift Web Console. A bucket class. Backing stores. Procedure In the OpenShift Web Console, click Storage -> Object Storage . Click the Bucket Class tab. Click the Action Menu (...) to the Bucket class you want to edit. Click Edit Bucket Class Resources . On the Edit Bucket Class Resources page, edit the bucket class resources either by adding a backing store to the bucket class or by removing a backing store from the bucket class. You can also edit bucket class resources created with one or two tiers and different placement policies. To add a backing store to the bucket class, select the name of the backing store. To remove a backing store from the bucket class, uncheck the name of the backing store. Click Save . Chapter 4. Managing namespace buckets Namespace buckets let you connect data repositories on different providers together, so that you can interact with all of your data through a single unified view. Add the object bucket associated with each provider to the namespace bucket, and access your data through the namespace bucket to see all of your object buckets at once. This lets you write to your preferred storage provider while reading from multiple other storage providers, greatly reducing the cost of migrating to a new storage provider. Note A namespace bucket can only be used if its write target is available and functional. 4.1. Amazon S3 API endpoints for objects in namespace buckets You can interact with objects in the namespace buckets using the Amazon Simple Storage Service (S3) API. Ensure that the credentials provided for the Multicloud Object Gateway (MCG) enables you to perform the AWS S3 namespace bucket operations. You can use the AWS tool, aws-cli to verify that all the operations can be performed on the target bucket. Also, the list bucket which is using this MCG account shows the target bucket. Red Hat OpenShift Data Foundation supports the following namespace bucket operations: ListBuckets ListObjects ListMultipartUploads ListObjectVersions GetObject HeadObject CopyObject PutObject CreateMultipartUpload UploadPartCopy UploadPart ListParts AbortMultipartUpload PubObjectTagging DeleteObjectTagging GetObjectTagging GetObjectAcl PutObjectAcl DeleteObject DeleteObjects See the Amazon S3 API reference documentation for the most up-to-date information about these operations and how to use them. Additional resources Amazon S3 REST API Reference Amazon S3 CLI Reference 4.2. Adding a namespace bucket using the Multicloud Object Gateway CLI and YAML For more information about namespace buckets, see Managing namespace buckets . Depending on the type of your deployment and whether you want to use YAML or the Multicloud Object Gateway (MCG) CLI, choose one of the following procedures to add a namespace bucket: Adding an AWS S3 namespace bucket using YAML Adding an IBM COS namespace bucket using YAML Adding an AWS S3 namespace bucket using the Multicloud Object Gateway CLI Adding an IBM COS namespace bucket using the Multicloud Object Gateway CLI 4.2.1. Adding an AWS S3 namespace bucket using YAML Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Access to the Multicloud Object Gateway (MCG). For information, see Chapter 2, Accessing the Multicloud Object Gateway with your applications . Procedure Create a secret with the credentials: where <namespacestore-secret-name> is a unique NamespaceStore name. You must provide and encode your own AWS access key ID and secret access key using Base64 , and use the results in place of <AWS ACCESS KEY ID ENCODED IN BASE64> and <AWS SECRET ACCESS KEY ENCODED IN BASE64> . Create a NamespaceStore resource using OpenShift custom resource definitions (CRDs). A NamespaceStore represents underlying storage to be used as a read or write target for the data in the MCG namespace buckets. To create a NamespaceStore resource, apply the following YAML: <resource-name> The name you want to give to the resource. <namespacestore-secret-name> The secret created in the step. <namespace-secret> The namespace where the secret can be found. <target-bucket> The target bucket you created for the NamespaceStore. Create a namespace bucket class that defines a namespace policy for the namespace buckets. The namespace policy requires a type of either single or multi . A namespace policy of type single requires the following configuration: <my-bucket-class> The unique namespace bucket class name. <resource> The name of a single NamespaceStore that defines the read and write target of the namespace bucket. A namespace policy of type multi requires the following configuration: <my-bucket-class> A unique bucket class name. <write-resource> The name of a single NamespaceStore that defines the write target of the namespace bucket. <read-resources> A list of the names of the NamespaceStores that defines the read targets of the namespace bucket. Create a bucket using an Object Bucket Class (OBC) resource that uses the bucket class defined in the earlier step using the following YAML: <resource-name> The name you want to give to the resource. <my-bucket> The name you want to give to the bucket. <my-bucket-class> The bucket class created in the step. After the OBC is provisioned by the operator, a bucket is created in the MCG, and the operator creates a Secret and ConfigMap with the same name and in the same namespace as that of the OBC. 4.2.2. Adding an IBM COS namespace bucket using YAML Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Access to the Multicloud Object Gateway (MCG), see Chapter 2, Accessing the Multicloud Object Gateway with your applications . Procedure Create a secret with the credentials: <namespacestore-secret-name> A unique NamespaceStore name. You must provide and encode your own IBM COS access key ID and secret access key using Base64 , and use the results in place of <IBM COS ACCESS KEY ID ENCODED IN BASE64> and <IBM COS SECRET ACCESS KEY ENCODED IN BASE64> . Create a NamespaceStore resource using OpenShift custom resource definitions (CRDs). A NamespaceStore represents underlying storage to be used as a read or write target for the data in the MCG namespace buckets. To create a NamespaceStore resource, apply the following YAML: <IBM COS ENDPOINT> The appropriate IBM COS endpoint. <namespacestore-secret-name> The secret created in the step. <namespace-secret> The namespace where the secret can be found. <target-bucket> The target bucket you created for the NamespaceStore. Create a namespace bucket class that defines a namespace policy for the namespace buckets. The namespace policy requires a type of either single or multi . The namespace policy of type single requires the following configuration: <my-bucket-class> The unique namespace bucket class name. <resource> The name of a single NamespaceStore that defines the read and write target of the namespace bucket. The namespace policy of type multi requires the following configuration: <my-bucket-class> The unique bucket class name. <write-resource> The name of a single NamespaceStore that defines the write target of the namespace bucket. <read-resources> A list of the NamespaceStores names that defines the read targets of the namespace bucket. To create a bucket using an Object Bucket Class (OBC) resource that uses the bucket class defined in the step, apply the following YAML: <resource-name> The name you want to give to the resource. <my-bucket> The name you want to give to the bucket. <my-bucket-class> The bucket class created in the step. After the OBC is provisioned by the operator, a bucket is created in the MCG, and the operator creates a Secret and ConfigMap with the same name and in the same namespace as that of the OBC. 4.2.3. Adding an AWS S3 namespace bucket using the Multicloud Object Gateway CLI Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Access to the Multicloud Object Gateway (MCG), see Chapter 2, Accessing the Multicloud Object Gateway with your applications . Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS Procedure In the MCG command-line interface, create a NamespaceStore resource. A NamespaceStore represents an underlying storage to be used as a read or write target for the data in MCG namespace buckets. <namespacestore> The name of the NamespaceStore. <AWS ACCESS KEY> and <AWS SECRET ACCESS KEY> The AWS access key ID and secret access key you created for this purpose. <bucket-name> The existing AWS bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. Create a namespace bucket class that defines a namespace policy for the namespace buckets. The namespace policy can be either single or multi . To create a namespace bucket class with a namespace policy of type single : <resource-name> The name you want to give the resource. <my-bucket-class> A unique bucket class name. <resource> A single namespace-store that defines the read and write target of the namespace bucket. To create a namespace bucket class with a namespace policy of type multi : <resource-name> The name you want to give the resource. <my-bucket-class> A unique bucket class name. <write-resource> A single namespace-store that defines the write target of the namespace bucket. <read-resources>s A list of namespace-stores separated by commas that defines the read targets of the namespace bucket. Create a bucket using an Object Bucket Class (OBC) resource that uses the bucket class defined in the step. <bucket-name> A bucket name of your choice. <custom-bucket-class> The name of the bucket class created in the step. After the OBC is provisioned by the operator, a bucket is created in the MCG, and the operator creates a Secret and a ConfigMap with the same name and in the same namespace as that of the OBC. 4.2.4. Adding an IBM COS namespace bucket using the Multicloud Object Gateway CLI Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Access to the Multicloud Object Gateway (MCG), see Chapter 2, Accessing the Multicloud Object Gateway with your applications . Download the MCG command-line interface binary from the customer portal and make it executable. Note Choose either Linux(x86_64), Windows, or Mac OS. Procedure In the MCG command-line interface, create a NamespaceStore resource. A NamespaceStore represents an underlying storage to be used as a read or write target for the data in the MCG namespace buckets. <namespacestore> The name of the NamespaceStore. <IBM ACCESS KEY> , <IBM SECRET ACCESS KEY> , <IBM COS ENDPOINT> An IBM access key ID, secret access key, and the appropriate regional endpoint that corresponds to the location of the existing IBM bucket. <bucket-name> An existing IBM bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. Create a namespace bucket class that defines a namespace policy for the namespace buckets. The namespace policy requires a type of either single or multi . To create a namespace bucket class with a namespace policy of type single : <resource-name> The name you want to give the resource. <my-bucket-class> A unique bucket class name. <resource> A single NamespaceStore that defines the read and write target of the namespace bucket. To create a namespace bucket class with a namespace policy of type multi : <resource-name> The name you want to give the resource. <my-bucket-class> A unique bucket class name. <write-resource> A single NamespaceStore that defines the write target of the namespace bucket. <read-resources> A comma-separated list of NamespaceStores that defines the read targets of the namespace bucket. Create a bucket using an Object Bucket Class (OBC) resource that uses the bucket class defined in the earlier step. <bucket-name> A bucket name of your choice. <custom-bucket-class> The name of the bucket class created in the step. After the OBC is provisioned by the operator, a bucket is created in the MCG, and the operator creates a Secret and ConfigMap with the same name and in the same namespace as that of the OBC. 4.3. Adding a namespace bucket using the OpenShift Container Platform user interface You can add namespace buckets using the OpenShift Container Platform user interface. For information about namespace buckets, see Managing namespace buckets . Prerequisites Ensure that Openshift Container Platform with OpenShift Data Foundation operator is already installed. Access to the Multicloud Object Gateway (MCG). Procedure On the OpenShift Web Console, navigate to Storage -> Object Storage -> Namespace Store tab. Click Create namespace store to create a namespacestore resources to be used in the namespace bucket. Enter a namespacestore name. Choose a provider and region. Either select an existing secret, or click Switch to credentials to create a secret by entering a secret key and secret access key. Enter a target bucket. Click Create . On the Namespace Store tab, verify that the newly created namespacestore is in the Ready state. Repeat steps 2 and 3 until you have created all the desired amount of resources. Navigate to Bucket Class tab and click Create Bucket Class . Choose Namespace BucketClass type radio button. Enter a BucketClass name and click . Choose a Namespace Policy Type for your namespace bucket, and then click . If your namespace policy type is Single , you need to choose a read resource. If your namespace policy type is Multi , you need to choose read resources and a write resource. If your namespace policy type is Cache , you need to choose a Hub namespace store that defines the read and write target of the namespace bucket. Select one Read and Write NamespaceStore which defines the read and write targets of the namespace bucket and click . Review your new bucket class details, and then click Create Bucket Class . Navigate to Bucket Class tab and verify that your newly created resource is in the Ready phase. Navigate to Object Bucket Claims tab and click Create Object Bucket Claim . Enter ObjectBucketClaim Name for the namespace bucket. Select StorageClass as openshift-storage.noobaa.io . Select the BucketClass that you created earlier for your namespacestore from the list. By default, noobaa-default-bucket-class gets selected. Click Create . The namespace bucket is created along with Object Bucket Claim for your namespace. Navigate to Object Bucket Claims tab and verify that the Object Bucket Claim created is in Bound state. Navigate to Object Buckets tab and verify that the your namespace bucket is present in the list and is in Bound state. 4.4. Sharing legacy application data with cloud native application using S3 protocol Many legacy applications use file systems to share data sets. You can access and share the legacy data in the file system by using the S3 operations. To share data you need to do the following: Export the pre-existing file system datasets, that is, RWX volume such as Ceph FileSystem (CephFS) or create a new file system datasets using the S3 protocol. Access file system datasets from both file system and S3 protocol. Configure S3 accounts and map them to the existing or a new file system unique identifiers (UIDs) and group identifiers (GIDs). 4.4.1. Creating a NamespaceStore to use a file system Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Access to the Multicloud Object Gateway (MCG). Procedure Log into the OpenShift Web Console. Click Storage -> Object Storage . Click the NamespaceStore tab to create NamespaceStore resources to be used in the namespace bucket. Click Create namespacestore . Enter a name for the NamespaceStore. Choose Filesystem as the provider. Choose the Persistent volume claim. Enter a folder name. If the folder name exists, then that folder is used to create the NamespaceStore or else a folder with that name is created. Click Create . Verify the NamespaceStore is in the Ready state. 4.4.2. Creating accounts with NamespaceStore filesystem configuration You can either create a new account with NamespaceStore filesystem configuration or convert an existing normal account into a NamespaceStore filesystem account by editing the YAML. Note You cannot remove a NamespaceStore filesystem configuration from an account. Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Create a new account with NamespaceStore filesystem configuration using the MCG command-line interface. For example: allow_bucket_create Indicates whether the account is allowed to create new buckets. Supported values are true or false . Default value is true . allowed_buckets A comma separated list of bucket names to which the user is allowed to have access and management rights. default_resource The NamespaceStore resource on which the new buckets will be created when using the S3 CreateBucket operation. The NamespaceStore must be backed by an RWX (ReadWriteMany) persistent volume claim (PVC). full_permission Indicates whether the account should be allowed full permission or not. Supported values are true or false . Default value is false . new_buckets_path The filesystem path where directories corresponding to new buckets will be created. The path is inside the filesystem of NamespaceStore filesystem PVCs where new directories are created to act as the filesystem mapping of newly created object bucket classes. nsfs_account_config A mandatory field that indicates if the account is used for NamespaceStore filesystem. nsfs_only Indicates whether the account is used only for NamespaceStore filesystem or not. Supported values are true or false . Default value is false . If it is set to 'true', it limits you from accessing other types of buckets. uid The user ID of the filesystem to which the MCG account will be mapped and it is used to access and manage data on the filesystem gid The group ID of the filesystem to which the MCG account will be mapped and it is used to access and manage data on the filesystem The MCG system sends a response with the account configuration and its S3 credentials: You can list all the custom resource definition (CRD) based accounts by using the following command: If you are interested in a particular account, you can read its custom resource definition (CRD) directly by the account name: 4.4.3. Accessing legacy application data from the openshift-storage namespace When using the Multicloud Object Gateway (MCG) NamespaceStore filesystem (NSFS) feature, you need to have the Persistent Volume Claim (PVC) where the data resides in the openshift-storage namespace. In almost all cases, the data you need to access is not in the openshift-storage namespace, but in the namespace that the legacy application uses. In order to access data stored in another namespace, you need to create a PVC in the openshift-storage namespace that points to the same CephFS volume that the legacy application uses. Procedure Display the application namespace with scc : <application_namespace> Specify the name of the application namespace. For example: Navigate into the application namespace: For example: Ensure that a ReadWriteMany (RWX) PVC is mounted on the pod that you want to consume from the noobaa S3 endpoint using the MCG NSFS feature: Check the mount point of the Persistent Volume (PV) inside your pod. Get the volume name of the PV from the pod: <pod_name> Specify the name of the pod. For example: In this example, the name of the volume for the PVC is cephfs-write-workload-generator-no-cache-pv-claim . List all the mounts in the pod, and check for the mount point of the volume that you identified in the step: For example: Confirm the mount point of the RWX PV in your pod: <mount_path> Specify the path to the mount point that you identified in the step. For example: Ensure that the UID and SELinux labels are the same as the ones that the legacy namespace uses: For example: Get the information of the legacy application RWX PV that you want to make accessible from the openshift-storage namespace: <pv_name> Specify the name of the PV. For example: Ensure that the PVC from the legacy application is accessible from the openshift-storage namespace so that one or more noobaa-endpoint pods can access the PVC. Find the values of the subvolumePath and volumeHandle from the volumeAttributes . You can get these values from the YAML description of the legacy application PV: For example: Use the subvolumePath and volumeHandle values that you identified in the step to create a new PV and PVC object in the openshift-storage namespace that points to the same CephFS volume as the legacy application PV: Example YAML file : 1 The storage capacity of the PV that you are creating in the openshift-storage namespace must be the same as the original PV. 2 The volume handle for the target PV that you create in openshift-storage needs to have a different handle than the original application PV, for example, add -clone at the end of the volume handle. 3 The storage capacity of the PVC that you are creating in the openshift-storage namespace must be the same as the original PVC. Create the PV and PVC in the openshift-storage namespace using the YAML file specified in the step: <YAML_file> Specify the name of the YAML file. For example: Ensure that the PVC is available in the openshift-storage namespace: Navigate into the openshift-storage project: Create the NSFS namespacestore: <nsfs_namespacestore> Specify the name of the NSFS namespacestore. <cephfs_pvc_name> Specify the name of the CephFS PVC in the openshift-storage namespace. For example: Ensure that the noobaa-endpoint pod restarts and that it successfully mounts the PVC at the NSFS namespacestore, for example, /nsfs/legacy-namespace mountpoint: <noobaa_endpoint_pod_name> Specify the name of the noobaa-endpoint pod. For example: Create a MCG user account: <user_account> Specify the name of the MCG user account. <gid_number> Specify the GID number. <uid_number> Specify the UID number. Important Use the same UID and GID as that of the legacy application. You can find it from the output. For example: Create a MCG bucket. Create a dedicated folder for S3 inside the NSFS share on the CephFS PV and PVC of the legacy application pod: For example: Create the MCG bucket using the nsfs/ path: For example: Check the SELinux labels of the folders residing in the PVCs in the legacy application and openshift-storage namespaces: For example: For example: In these examples, you can see that the SELinux labels are not the same which results in permission denied or access issues. Ensure that the legacy application and openshift-storage pods use the same SELinux labels on the files. You can do this in one of the following ways: Section 4.4.3.1, "Changing the default SELinux label on the legacy application project to match the one in the openshift-storage project" . Section 4.4.3.2, "Modifying the SELinux label only for the deployment config that has the pod which mounts the legacy application PVC" . Delete the NSFS namespacestore: Delete the MCG bucket: For example: Delete the MCG user account: For example: Delete the NSFS namespacestore: For example: Delete the PV and PVC: Important Before you delete the PV and PVC, ensure that the PV has a retain policy configured. <cephfs_pv_name> Specify the CephFS PV name of the legacy application. <cephfs_pvc_name> Specify the CephFS PVC name of the legacy application. For example: 4.4.3.1. Changing the default SELinux label on the legacy application project to match the one in the openshift-storage project Display the current openshift-storage namespace with sa.scc.mcs : Edit the legacy application namespace, and modify the sa.scc.mcs with the value from the sa.scc.mcs of the openshift-storage namespace: For example: For example: Restart the legacy application pod. A relabel of all the files take place and now the SELinux labels match with the openshift-storage deployment. 4.4.3.2. Modifying the SELinux label only for the deployment config that has the pod which mounts the legacy application PVC Create a new scc with the MustRunAs and seLinuxOptions options, with the Multi Category Security (MCS) that the openshift-storage project uses. Example YAML file: Create a service account for the deployment and add it to the newly created scc . Create a service account: <service_account_name>` Specify the name of the service account. For example: Add the service account to the newly created scc : For example: Patch the legacy application deployment so that it uses the newly created service account. This allows you to specify the SELinux label in the deployment: For example: Edit the deployment to specify the security context to use at the SELinux label in the deployment configuration: Add the following lines: <security_context_value> You can find this value when you execute the command to create a dedicated folder for S3 inside the NSFS share, on the CephFS PV and PVC of the legacy application pod. For example: Ensure that the security context to be used at the SELinux label in the deployment configuration is specified correctly: For example" The legacy application is restarted and begins using the same SELinux labels as the openshift-storage namespace. Chapter 5. Securing Multicloud Object Gateway 5.1. Changing the default account credentials to ensure better security in the Multicloud Object Gateway Change and rotate your Multicloud Object Gateway (MCG) account credentials using the command-line interface to prevent issues with applications, and to ensure better account security. 5.1.1. Resetting the noobaa account password Prerequisites A running OpenShift Data Foundation cluster. Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure To reset the noobaa account password, run the following command: Example: Example output: Important To access the admin account credentials run the noobaa status command from the terminal: 5.1.2. Regenerating the S3 credentials for the accounts Prerequisites A running OpenShift Data Foundation cluster. Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Get the account name. For listing the accounts, run the following command: Example output: Alternatively, run the oc get noobaaaccount command from the terminal: Example output: To regenerate the noobaa account S3 credentials, run the following command: Once you run the noobaa account regenerate command it will prompt a warning that says "This will invalidate all connections between S3 clients and NooBaa which are connected using the current credentials." , and ask for confirmation: Example: Example output: On approving, it will regenerate the credentials and eventually print them: 5.1.3. Regenerating the S3 credentials for the OBC Prerequisites A running OpenShift Data Foundation cluster. Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure To get the OBC name, run the following command: Example output: Alternatively, run the oc get obc command from the terminal: Example output: To regenerate the noobaa OBC S3 credentials, run the following command: Once you run the noobaa obc regenerate command it will prompt a warning that says "This will invalidate all connections between the S3 clients and noobaa which are connected using the current credentials." , and ask for confirmation: Example: Example output: On approving, it will regenerate the credentials and eventually print them: 5.2. Enabling secured mode deployment for Multicloud Object Gateway You can specify a range of IP addresses that should be allowed to reach the Multicloud Object Gateway (MCG) load balancer services to enable secure mode deployment. This helps to control the IP addresses that can access the MCG services. Note You can disable the MCG load balancer usage by setting the disableLoadBalancerService variable in the storagecluster custom resource definition (CRD) while deploying OpenShift Data Foundation using the command line interface. This helps to restrict MCG from creating any public resources for private clusters and to disable the MCG service EXTERNAL-IP . For more information, see the Red Hat Knowledgebase article Install Red Hat OpenShift Data Foundation 4.X in internal mode using command line interface . For information about disabling MCG load balancer service after deploying OpenShift Data Foundation, see Disabling Multicloud Object Gateway external service after deploying OpenShift Data Foundation . Prerequisites A running OpenShift Data Foundation cluster. In case of a bare metal deployment, ensure that the load balancer controller supports setting the loadBalancerSourceRanges attribute in the Kubernetes services. Procedure Edit the NooBaa custom resource (CR) to specify the range of IP addresses that can access the MCG services after deploying OpenShift Data Foundation. noobaa The NooBaa CR type that controls the NooBaa system deployment. noobaa The name of the NooBaa CR. For example: loadBalancerSourceSubnets A new field that can be added under spec in the NooBaa CR to specify the IP addresses that should have access to the NooBaa services. In this example, all the IP addresses that are in the subnet 10.0.0.0/16 or 192.168.10.0/32 will be able to access MCG S3 and security token service (STS) while the other IP addresses are not allowed to access. Verification steps To verify if the specified IP addresses are set, in the OpenShift Web Console, run the following command and check if the output matches with the IP addresses provided to MCG: Chapter 6. Mirroring data for hybrid and Multicloud buckets You can use the simplified process of the Multicloud Object Gateway (MCG) to span data across cloud providers and clusters. Before you create a bucket class that reflects the data management policy and mirroring, you must add a backing storage that can be used by the MCG. For information, see Chapter 4, Chapter 3, Adding storage resources for hybrid or Multicloud . You can set up mirroring data by using the OpenShift UI, YAML or MCG command-line interface. See the following sections: Section 6.1, "Creating bucket classes to mirror data using the MCG command-line-interface" Section 6.2, "Creating bucket classes to mirror data using a YAML" 6.1. Creating bucket classes to mirror data using the MCG command-line-interface Prerequisites Ensure to download Multicloud Object Gateway (MCG) command-line interface. Procedure From the Multicloud Object Gateway (MCG) command-line interface, run the following command to create a bucket class with a mirroring policy: Set the newly created bucket class to a new bucket claim to generate a new bucket that will be mirrored between two locations: 6.2. Creating bucket classes to mirror data using a YAML Apply the following YAML. This YAML is a hybrid example that mirrors data between local Ceph storage and AWS: Add the following lines to your standard Object Bucket Claim (OBC): For more information about OBCs, see Chapter 9, Object Bucket Claim . Chapter 7. Bucket policies in the Multicloud Object Gateway OpenShift Data Foundation supports AWS S3 bucket policies. Bucket policies allow you to grant users access permissions for buckets and the objects in them. 7.1. Introduction to bucket policies Bucket policies are an access policy option available for you to grant permission to your AWS S3 buckets and objects. Bucket policies use JSON-based access policy language. For more information about access policy language, see AWS Access Policy Language Overview . 7.2. Using bucket policies in Multicloud Object Gateway Prerequisites A running OpenShift Data Foundation Platform. Access to the Multicloud Object Gateway (MCG), see Chapter 2, Accessing the Multicloud Object Gateway with your applications A valid Multicloud Object Gateway user account. See Creating a user in the Multicloud Object Gateway for instructions to create a user account. Procedure To use bucket policies in the MCG: Create the bucket policy in JSON format. For example: Replace [email protected] with a valid Multicloud Object Gateway user account. Using AWS S3 client, use the put-bucket-policy command to apply the bucket policy to your S3 bucket: Replace ENDPOINT with the S3 endpoint. Replace MyBucket with the bucket to set the policy on. Replace BucketPolicy with the bucket policy JSON file. Add --no-verify-ssl if you are using the default self signed certificates. For example: For more information on the put-bucket-policy command, see the AWS CLI Command Reference for put-bucket-policy . Note The principal element specifies the user that is allowed or denied access to a resource, such as a bucket. Currently, Only NooBaa accounts can be used as principals. In the case of object bucket claims, NooBaa automatically create an account obc-account.<generated bucket name>@noobaa.io . Note Bucket policy conditions are not supported. Additional resources There are many available elements for bucket policies with regard to access permissions. For details on these elements and examples of how they can be used to control the access permissions, see AWS Access Policy Language Overview . For more examples of bucket policies, see AWS Bucket Policy Examples . OpenShift Data Foundation version 4.16 introduces the bucket policy elements NotPrincipal , NotAction , and NotResource . For more information on these elements, see IAM JSON policy elements reference . 7.3. Creating a user in the Multicloud Object Gateway Prerequisites A running OpenShift Data Foundation Platform. Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Execute the following command to create an MCG user account: <noobaa-account-name> Specify the name of the new MCG user account. --allow_bucket_create Allows the user to create new buckets. --allowed_buckets Sets the user's allowed bucket list (use commas or multiple flags). --default_resource Sets the default resource.The new buckets are created on this default resource (including the future ones). --full_permission Allows this account to access all existing and future buckets. Important You need to provide permission to access atleast one bucket or full permission to access all the buckets. Chapter 8. Multicloud Object Gateway bucket replication Data replication from one Multicloud Object Gateway (MCG) bucket to another MCG bucket provides higher resiliency and better collaboration options. These buckets can be either data buckets or namespace buckets backed by any supported storage solution (AWS S3, Azure, and so on). A replication policy is composed of a list of replication rules. Each rule defines the destination bucket, and can specify a filter based on an object key prefix. Configuring a complementing replication policy on the second bucket results in bidirectional replication. Prerequisites A running OpenShift Data Foundation Platform. Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. To replicate a bucket, see Replicating a bucket to another bucket . To set a bucket class replication policy, see Setting a bucket class replication policy . 8.1. Replicating a bucket to another bucket You can set the bucket replication policy in two ways: Replicating a bucket to another bucket using the MCG command-line interface . Replicating a bucket to another bucket using a YAML . 8.1.1. Replicating a bucket to another bucket using the MCG command-line interface You can set a replication policy for Multicloud Object Gateway (MCG) data bucket at the time of creation of object bucket claim (OBC). You must define the replication policy parameter in a JSON file. Procedure From the MCG command-line interface, run the following command to create an OBC with a specific replication policy: <bucket-claim-name> Specify the name of the bucket claim. /path/to/json-file.json Is the path to a JSON file which defines the replication policy. Example JSON file: "prefix" Is optional. It is the prefix of the object keys that should be replicated, and you can even leave it empty, for example, {"prefix": ""} . For example: 8.1.2. Replicating a bucket to another bucket using a YAML You can set a replication policy for Multicloud Object Gateway (MCG) data bucket at the time of creation of object bucket claim (OBC) or you can edit the YAML later. You must provide the policy as a JSON-compliant string that adheres to the format shown in the following procedure. Procedure Apply the following YAML: <desired-bucket-claim> Specify the name of the bucket claim. <desired-namespace> Specify the namespace. <desired-bucket-name> Specify the prefix of the bucket name. "rule_id" Specify the ID number of the rule, for example, {"rule_id": "rule-1"} . "destination_bucket" Specify the name of the destination bucket, for example, {"destination_bucket": "first.bucket"} . "prefix" Is optional. It is the prefix of the object keys that should be replicated, and you can even leave it empty, for example, {"prefix": ""} . Additional information For more information about OBCs, see Object Bucket Claim . 8.2. Setting a bucket class replication policy It is possible to set up a replication policy that automatically applies to all the buckets created under a certain bucket class. You can do this in two ways: Setting a bucket class replication policy using the MCG command-line interface . Setting a bucket class replication policy using a YAML . 8.2.1. Setting a bucket class replication policy using the MCG command-line interface You can set a replication policy for Multicloud Object Gateway (MCG) data bucket at the time of creation of bucket class. You must define the replication-policy parameter in a JSON file. You can set a bucket class replication policy for the Placement and Namespace bucket classes. You can set a bucket class replication policy for the Placement and Namespace bucket classes. Procedure From the MCG command-line interface, run the following command: <bucketclass-name> Specify the name of the bucket class. <backingstores> Specify the name of a backingstore. You can pass many backingstores separated by commas. /path/to/json-file.json Is the path to a JSON file which defines the replication policy. Example JSON file: "prefix" Is optional. The prefix of the object keys gets replicated. You can leave it empty, for example, {"prefix": ""} . For example: This example creates a placement bucket class with a specific replication policy defined in the JSON file. 8.2.2. Setting a bucket class replication policy using a YAML You can set a replication policy for Multicloud Object Gateway (MCG) data bucket at the time of creation of bucket class or you can edit their YAML later. You must provide the policy as a JSON-compliant string that adheres to the format shown in the following procedure. Procedure Apply the following YAML: This YAML is an example that creates a placement bucket class. Each Object bucket claim (OBC) object that is uploaded to the bucket is filtered based on the prefix and is replicated to first.bucket . <desired-app-label> Specify a label for the app. <desired-bucketclass-name> Specify the bucket class name. <desired-namespace> Specify the namespace in which the bucket class gets created. <backingstore> Specify the name of a backingstore. You can pass many backingstores. "rule_id" Specify the ID number of the rule, for example, `{"rule_id": "rule-1"} . "destination_bucket" Specify the name of the destination bucket, for example, {"destination_bucket": "first.bucket"} . "prefix" Is optional. The prefix of the object keys gets replicated. You can leave it empty, for example, {"prefix": ""} . 8.3. Enabling log based bucket replication When creating a bucket replication policy, you can use logs so that recent data is replicated more quickly, while the default scan-based replication works on replicating the rest of the data. Important This feature requires setting up bucket logs on AWS or Azure.For more information about setting up AWS logs, see Enabling Amazon S3 server access logging . The AWS logs bucket needs to be created in the same region as the source NamespaceStore AWS bucket. Note This feature is only supported in buckets that are backed by a NamespaceStore. Buckets backed by BackingStores cannot utilized log-based replication. 8.3.1. Enabling log based bucket replication for new namespace buckets using OpenShift Web Console in Amazon Web Service environment You can optimize replication by using the event logs of the Amazon Web Service(AWS) cloud environment. You enable log based bucket replication for new namespace buckets using the web console during the creation of namespace buckets. Prerequisites Ensure that object logging is enabled in AWS. For more information, see the "Using the S3 console" section in Enabling Amazon S3 server access logging . Administrator access to OpenShift Web Console. Procedure In the OpenShift Web Console, navigate to Storage -> Object Storage -> Object Bucket Claims . Click Create ObjectBucketClaim . Enter the name of ObjectBucketName and select StorageClass and BucketClass. Select the Enable replication check box to enable replication. In the Replication policy section, select the Optimize replication using event logs checkbox. Enter the name of the bucket that will contain the logs under Event log Bucket . If the logs are not stored in the root of the bucket, provide the full path without s3:// Enter a prefix to replicate only the objects whose name begins with the given prefix. 8.3.2. Enabling log based bucket replication for existing namespace buckets using YAML You can enable log based bucket replication for the existing buckets that are created using the command line interface or by applying an YAML, and not the buckets that are created using AWS S3 commands. Procedure Edit the YAML of the bucket's OBC to enable log based bucket replication. Add the following under spec : Note It is also possible to add this to the YAML of an OBC before it is created. rule_id Specify an ID of your choice for identifying the rule destination_bucket Specify the name of the target MCG bucket that the objects are copied to (optional) {"filter": {"prefix": <>}} Specify a prefix string that you can set to filter the objects that are replicated log_replication_info Specify an object that contains data related to log-based replication optimization. {"logs_location": {"logs_bucket": <>}} is set to the location of the AWS S3 server access logs. 8.3.3. Enabling log based bucket replication in Microsoft Azure Prerequisites Refer to Microsoft Azure documentation and ensure that you have completed the following tasks in the Microsoft Azure portal: Ensure that have created a new application and noted down the name, application (client) ID, and directory (tenant) ID. For information, see Register an application . Ensure that a new a new client secret is created and the application secret is noted down. Ensure that a new Log Analytics workspace is created and its name and workspace ID is noted down. For information, see Create a Log Analytics workspace . Ensure that the Reader role is assigned under Access control and members are selected and the name of the application that you registered in the step is provided. For more information, see Assign Azure roles using the Azure portal . Ensure that a new storage account is created and the Access keys are noted down. In the Monitoring section of the storage account created, select a blob and in the Diagnostic settings screen, select only StorageWrite and StorageDelete , and in the destination details add the Log Analytics workspace that you created earlier. Ensure that a blob is selected in the Diagnostic settings screen of the Monitoring section of the storage account created. Also, ensure that only StorageWrite and StorageDelete is selected and in the destination details, the Log Analytics workspace that you created earlier is added. For more information, see Diagnostic settings in Azure Monitor . Ensure that two new containers for object source and object destination are created. Administrator access to OpenShift Web Console. Procedure Create a secret with credentials to be used by the namespacestores . Create a NamespaceStore backed by a container created in Azure. For more information, see Adding a namespace bucket using the OpenShift Container Platform user interface . Create a new Namespace-Bucketclass and OBC that utilizes it. Check the object bucket name by looking in the YAML of target OBC, or by listing all S3 buckets, for example, - s3 ls . Use the following template to apply an Azure replication policy on your source OBC by adding the following in its YAML, under .spec : sync_deletion Specify a boolean value, true or false . destination_bucket Make sure to use the name of the object bucket, and not the claim. The name can be retrieved using the s3 ls command, or by looking for the value in an OBC's YAML. Verification steps Write objects to the source bucket. Wait until MCG replicates them. Delete the objects from the source bucket. Verify the objects were removed from the target bucket. 8.3.4. Enabling log-based bucket replication deletion Prerequisites Administrator access to OpenShift Web Console. AWS Server Access Logging configured for the desired bucket. Procedure In the OpenShift Web Console, navigate to Storage -> Object Storage -> Object Bucket Claims . Click Create new Object bucket claim . (Optional) In the Replication rules section, select the Sync deletion checkbox for each rule separately. Enter the name of the bucket that will contain the logs under Event log Bucket . If the logs are not stored in the root of the bucket, provide the full path without s3:// Enter a prefix to replicate only the objects whose name begins with the given prefix. 8.4. Enabling bucket logging for Multicloud Object Gateway [Technology preview] Bucket logging helps you to record the S3 operations that are performed against the Multicloud Object Gateway (MCG) bucket for compliance, auditing, and optimization purposes. Important Technology Preview features provide early access to upcoming product innovations, enabling you to test functionality and provide feedback during the development process. However, these features are not fully supported under Red Hat Service Level Agreements, may not be functionally complete, and are not intended for production use. As Red Hat considers making future iterations of Technology Preview features generally available, we will attempt to resolve any issues that customers experience when using these features. See Technology Preview Features Support Scope for more information. Prerequisites Openshift Container Platform with OpenShift Data Foundation operator installed. Access to MCG. For information, see Accessing the Multicloud Object Gateway with your applications . Procedure Create a data bucket where you can upload the objects. Create a log bucket where you want to store the logs for bucket operations by using the following command: Configure bucket logging on data bucket with log bucket. Verify if the bucket logging is set for the data bucket. The S3 operations can take up to 24 hours to get recorded in the logs bucket. The following example shows the recorded logs and how to download them: Example (Optional) To disable bucket logging, use the following command: Chapter 9. Object Bucket Claim An Object Bucket Claim can be used to request an S3 compatible bucket backend for your workloads. You can create an Object Bucket Claim in three ways: Section 9.1, "Dynamic Object Bucket Claim" Section 9.2, "Creating an Object Bucket Claim using the command line interface" Section 9.3, "Creating an Object Bucket Claim using the OpenShift Web Console" An object bucket claim creates a new bucket and an application account in NooBaa with permissions to the bucket, including a new access key and secret access key. The application account is allowed to access only a single bucket and can't create new buckets by default. 9.1. Dynamic Object Bucket Claim Similar to Persistent Volumes, you can add the details of the Object Bucket claim (OBC) to your application's YAML, and get the object service endpoint, access key, and secret access key available in a configuration map and secret. It is easy to read this information dynamically into environment variables of your application. Note The Multicloud Object Gateway endpoints uses self-signed certificates only if OpenShift uses self-signed certificates. Using signed certificates in OpenShift automatically replaces the Multicloud Object Gateway endpoints certificates with signed certificates. Get the certificate currently used by Multicloud Object Gateway by accessing the endpoint via the browser. See Accessing the Multicloud Object Gateway with your applications for more information. Procedure Add the following lines to your application YAML: These lines are the OBC itself. Replace <obc-name> with the a unique OBC name. Replace <obc-bucket-name> with a unique bucket name for your OBC. To automate the use of the OBC add more lines to the YAML file. For example: The example is the mapping between the bucket claim result, which is a configuration map with data and a secret with the credentials. This specific job claims the Object Bucket from NooBaa, which creates a bucket and an account. Replace all instances of <obc-name> with your OBC name. Replace <your application image> with your application image. Apply the updated YAML file: Replace <yaml.file> with the name of your YAML file. To view the new configuration map, run the following: Replace obc-name with the name of your OBC. You can expect the following environment variables in the output: BUCKET_HOST - Endpoint to use in the application. BUCKET_PORT - The port available for the application. The port is related to the BUCKET_HOST . For example, if the BUCKET_HOST is https://my.example.com , and the BUCKET_PORT is 443, the endpoint for the object service would be https://my.example.com:443 . BUCKET_NAME - Requested or generated bucket name. AWS_ACCESS_KEY_ID - Access key that is part of the credentials. AWS_SECRET_ACCESS_KEY - Secret access key that is part of the credentials. Important Retrieve the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY . The names are used so that it is compatible with the AWS S3 API. You need to specify the keys while performing S3 operations, especially when you read, write or list from the Multicloud Object Gateway (MCG) bucket. The keys are encoded in Base64. Decode the keys before using them. <obc_name> Specify the name of the object bucket claim. 9.2. Creating an Object Bucket Claim using the command line interface When creating an Object Bucket Claim (OBC) using the command-line interface, you get a configuration map and a Secret that together contain all the information your application needs to use the object storage service. Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Use the command-line interface to generate the details of a new bucket and credentials. Run the following command: Replace <obc-name> with a unique OBC name, for example, myappobc . Additionally, you can use the --app-namespace option to specify the namespace where the OBC configuration map and secret will be created, for example, myapp-namespace . For example: The MCG command-line-interface has created the necessary configuration and has informed OpenShift about the new OBC. Run the following command to view the OBC: For example: Run the following command to view the YAML file for the new OBC: For example: Inside of your openshift-storage namespace, you can find the configuration map and the secret to use this OBC. The CM and the secret have the same name as the OBC. Run the following command to view the secret: For example: The secret gives you the S3 access credentials. Run the following command to view the configuration map: For example: The configuration map contains the S3 endpoint information for your application. 9.3. Creating an Object Bucket Claim using the OpenShift Web Console You can create an Object Bucket Claim (OBC) using the OpenShift Web Console. Prerequisites Administrative access to the OpenShift Web Console. In order for your applications to communicate with the OBC, you need to use the configmap and secret. For more information about this, see Section 9.1, "Dynamic Object Bucket Claim" . Procedure Log into the OpenShift Web Console. On the left navigation bar, click Storage -> Object Storage -> Object Bucket Claims -> Create Object Bucket Claim . Enter a name for your object bucket claim and select the appropriate storage class based on your deployment, internal or external, from the dropdown menu: Internal mode The following storage classes, which were created after deployment, are available for use: ocs-storagecluster-ceph-rgw uses the Ceph Object Gateway (RGW) openshift-storage.noobaa.io uses the Multicloud Object Gateway (MCG) External mode The following storage classes, which were created after deployment, are available for use: ocs-external-storagecluster-ceph-rgw uses the RGW openshift-storage.noobaa.io uses the MCG Note The RGW OBC storage class is only available with fresh installations of OpenShift Data Foundation version 4.5. It does not apply to clusters upgraded from OpenShift Data Foundation releases. Click Create . Once you create the OBC, you are redirected to its detail page. 9.4. Attaching an Object Bucket Claim to a deployment Once created, Object Bucket Claims (OBCs) can be attached to specific deployments. Prerequisites Administrative access to the OpenShift Web Console. Procedure On the left navigation bar, click Storage -> Object Storage -> Object Bucket Claims . Click the Action menu (...) to the OBC you created. From the drop-down menu, select Attach to Deployment . Select the desired deployment from the Deployment Name list, then click Attach . 9.5. Viewing object buckets using the OpenShift Web Console You can view the details of object buckets created for Object Bucket Claims (OBCs) using the OpenShift Web Console. Prerequisites Administrative access to the OpenShift Web Console. Procedure Log into the OpenShift Web Console. On the left navigation bar, click Storage -> Object Storage -> Object Buckets . Optonal: You can also navigate to the details page of a specific OBC, and click the Resource link to view the object buckets for that OBC. Select the object bucket of which you want to see the details. Once selected you are navigated to the Object Bucket Details page. 9.6. Deleting Object Bucket Claims Prerequisites Administrative access to the OpenShift Web Console. Procedure On the left navigation bar, click Storage -> Object Storage -> Object Bucket Claims . Click the Action menu (...) to the Object Bucket Claim (OBC) you want to delete. Select Delete Object Bucket Claim . Click Delete . Chapter 10. Caching policy for object buckets A cache bucket is a namespace bucket with a hub target and a cache target. The hub target is an S3 compatible large object storage bucket. The cache bucket is the local Multicloud Object Gateway (MCG) bucket. You can create a cache bucket that caches an AWS bucket or an IBM COS bucket. AWS S3 IBM COS 10.1. Creating an AWS cache bucket Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Create a NamespaceStore resource. A NamespaceStore represents an underlying storage to be used as a read or write target for the data in the MCG namespace buckets. From the MCG command-line interface, run the following command: Replace <namespacestore> with the name of the namespacestore. Replace <AWS ACCESS KEY> and <AWS SECRET ACCESS KEY> with an AWS access key ID and secret access key you created for this purpose. Replace <bucket-name> with an existing AWS bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. You can also add storage resources by applying a YAML. First create a secret with credentials: You must supply and encode your own AWS access key ID and secret access key using Base64, and use the results in place of <AWS ACCESS KEY ID ENCODED IN BASE64> and <AWS SECRET ACCESS KEY ENCODED IN BASE64> . Replace <namespacestore-secret-name> with a unique name. Then apply the following YAML: Replace <namespacestore> with a unique name. Replace <namespacestore-secret-name> with the secret created in the step. Replace <namespace-secret> with the namespace used to create the secret in the step. Replace <target-bucket> with the AWS S3 bucket you created for the namespacestore. Run the following command to create a bucket class: Replace <my-cache-bucket-class> with a unique bucket class name. Replace <backing-store> with the relevant backing store. You can list one or more backingstores separated by commas in this field. Replace <namespacestore> with the namespacestore created in the step. Run the following command to create a bucket using an Object Bucket Claim (OBC) resource that uses the bucket class defined in step 2. Replace <my-bucket-claim> with a unique name. Replace <custom-bucket-class> with the name of the bucket class created in step 2. 10.2. Creating an IBM COS cache bucket Prerequisites Download the Multicloud Object Gateway (MCG) command-line interface binary from the customer portal and make it executable. Note Choose the correct product variant according to your architecture. Available platforms are Linux(x86_64), Windows, and Mac OS. Procedure Create a NamespaceStore resource. A NamespaceStore represents an underlying storage to be used as a read or write target for the data in the MCG namespace buckets. From the MCG command-line interface, run the following command: Replace <namespacestore> with the name of the NamespaceStore. Replace <IBM ACCESS KEY> , <IBM SECRET ACCESS KEY> , <IBM COS ENDPOINT> with an IBM access key ID, secret access key and the appropriate regional endpoint that corresponds to the location of the existing IBM bucket. Replace <bucket-name> with an existing IBM bucket name. This argument tells the MCG which bucket to use as a target bucket for its backing store, and subsequently, data storage and administration. You can also add storage resources by applying a YAML. First, Create a secret with the credentials: You must supply and encode your own IBM COS access key ID and secret access key using Base64, and use the results in place of <IBM COS ACCESS KEY ID ENCODED IN BASE64> and <IBM COS SECRET ACCESS KEY ENCODED IN BASE64> . Replace <namespacestore-secret-name> with a unique name. Then apply the following YAML: Replace <namespacestore> with a unique name. Replace <IBM COS ENDPOINT> with the appropriate IBM COS endpoint. Replace <backingstore-secret-name> with the secret created in the step. Replace <namespace-secret> with the namespace used to create the secret in the step. Replace <target-bucket> with the AWS S3 bucket you created for the namespacestore. Run the following command to create a bucket class: Replace <my-bucket-class> with a unique bucket class name. Replace <backing-store> with the relevant backing store. You can list one or more backingstores separated by commas in this field. Replace <namespacestore> with the namespacestore created in the step. Run the following command to create a bucket using an Object Bucket Claim resource that uses the bucket class defined in step 2. Replace <my-bucket-claim> with a unique name. Replace <custom-bucket-class> with the name of the bucket class created in step 2. Chapter 11. Lifecyle bucket configuration in Multicloud Object Gateway Multicloud Object Gateway (MCG) lifecycle provides a way to reduce storage costs due to accumulated data objects. Deletion of expired objects is a simplified way that enables handling of unused data. Data expiration is a part of Amazon Web Services (AWS) lifecycle management and sets an expiration date for automatic deletion. The minimal time resolution of the lifecycle expiration is one day. For more information, see Expiring objects . AWS S3 API is used to configure lifecyle bucket in MCG. For information about the data bucket APIs and their support level, see Support of Multicloud Object Gateway data bucket APIs . There are a few limitations with the expiratation rule API for MCG in comaparison with AWS: ExpiredObjectDeleteMarker is accepted but it is not processed. No option to define specific non-current version's expiration conditions Chapter 12. Scaling Multicloud Object Gateway performance The Multicloud Object Gateway (MCG) performance may vary from one environment to another. In some cases, specific applications require faster performance which can be easily addressed by scaling S3 endpoints. The MCG resource pool is a group of NooBaa daemon containers that provide two types of services enabled by default: Storage service S3 endpoint service S3 endpoint service The S3 endpoint is a service that every Multicloud Object Gateway (MCG) provides by default that handles the heavy lifting data digestion in the MCG. The endpoint service handles the inline data chunking, deduplication, compression, and encryption, and it accepts data placement instructions from the MCG. 12.1. Automatic scaling of MultiCloud Object Gateway endpoints The number of MultiCloud Object Gateway (MCG) endpoints scale automatically when the load on the MCG S3 service increases or decreases. OpenShift Data Foundation clusters are deployed with one active MCG endpoint. Each MCG endpoint pod is configured by default with 1 CPU and 2Gi memory request, with limits matching the request. When the CPU load on the endpoint crosses over an 80% usage threshold for a consistent period of time, a second endpoint is deployed lowering the load on the first endpoint. When the average CPU load on both endpoints falls below the 80% threshold for a consistent period of time, one of the endpoints is deleted. This feature improves performance and serviceability of the MCG. You can scale the Horizontal Pod Autoscaler (HPA) for noobaa-endpoint using the following oc patch command, for example: The example above sets the minCount to 3 and the maxCount to `10 . 12.2. Increasing CPU and memory for PV pool resources MCG default configuration supports low resource consumption. However, when you need to increase CPU and memory to accommodate specific workloads and to increase MCG performance for the workloads, you can configure the required values for CPU and memory in the OpenShift Web Console. Procedure In the OpenShift Web Console, navigate to Storage -> Object Storage -> Backing Store . Select the relevant backing store and click on YAML. Scroll down until you find spec: and update pvPool with CPU and memory. Add a new property of limits and then add cpu and memory. Example reference: Click Save . Verification steps To verfiy, you can check the resource values of the PV pool pods. Chapter 13. Accessing the RADOS Object Gateway S3 endpoint Users can access the RADOS Object Gateway (RGW) endpoint directly. In versions of Red Hat OpenShift Data Foundation, RGW service needed to be manually exposed to create RGW public route. As of OpenShift Data Foundation version 4.7, the RGW route is created by default and is named rook-ceph-rgw-ocs-storagecluster-cephobjectstore . Chapter 14. Using TLS certificates for applications accessing RGW Most of the S3 applications require TLS certificate in the forms such as an option included in the Deployment configuration file, passed as a file in the request, or stored in /etc/pki paths. TLS certificates for RADOS Object Gateway (RGW) are stored as Kubernetes secret and you need to fetch the details from the secret. Prerequisites A running OpenShift Data Foundation cluster. Procedure For internal RGW server Get the TLS certificate and key from the kubernetes secret: <secret_name> The default kubernetes secret name is <objectstore_name>-cos-ceph-rgw-tls-cert . Specify the name of the object store. For external RGW server Get the the TLS certificate from the kubernetes secret: <secret_name> The default kubernetes secret name is ceph-rgw-tls-cert and it is an opaque type of secret. The key value for storing the TLS certificates is cert . 14.1. Accessing External RGW server in OpenShift Data Foundation Accessing External RGW server using Object Bucket Claims The S3 credentials such as AccessKey or Secret Key is stored in the secret generated by the Object Bucket Claim (OBC) creation and you can fetch the same by using the following commands: Similarly, you can fetch the endpoint details from the configmap of OBC: Accessing External RGW server using the Ceph Object Store User CR You can fetch the S3 Credentials and endpoint details from the secret generated as part of the Ceph Object Store User CR: Important For both the access mechanisms, you can either request for new certificates from the administrator or reuse the certificates from the Kubernetes secret, ceph-rgw-tls-cert .
[ "oc describe noobaa -n openshift-storage", "Name: noobaa Namespace: openshift-storage Labels: <none> Annotations: <none> API Version: noobaa.io/v1alpha1 Kind: NooBaa Metadata: Creation Timestamp: 2019-07-29T16:22:06Z Generation: 1 Resource Version: 6718822 Self Link: /apis/noobaa.io/v1alpha1/namespaces/openshift-storage/noobaas/noobaa UID: 019cfb4a-b21d-11e9-9a02-06c8de012f9e Spec: Status: Accounts: Admin: Secret Ref: Name: noobaa-admin Namespace: openshift-storage Actual Image: noobaa/noobaa-core:4.0 Observed Generation: 1 Phase: Ready Readme: Welcome to NooBaa! ----------------- Welcome to NooBaa! ----------------- NooBaa Core Version: NooBaa Operator Version: Lets get started: 1. Connect to Management console: Read your mgmt console login information (email & password) from secret: \"noobaa-admin\". kubectl get secret noobaa-admin -n openshift-storage -o json | jq '.data|map_values(@base64d)' Open the management console service - take External IP/DNS or Node Port or use port forwarding: kubectl port-forward -n openshift-storage service/noobaa-mgmt 11443:443 & open https://localhost:11443 2. Test S3 client: kubectl port-forward -n openshift-storage service/s3 10443:443 & 1 NOOBAA_ACCESS_KEY=USD(kubectl get secret noobaa-admin -n openshift-storage -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d') 2 NOOBAA_SECRET_KEY=USD(kubectl get secret noobaa-admin -n openshift-storage -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d') alias s3='AWS_ACCESS_KEY_ID=USDNOOBAA_ACCESS_KEY AWS_SECRET_ACCESS_KEY=USDNOOBAA_SECRET_KEY aws --endpoint https://localhost:10443 --no-verify-ssl s3' s3 ls Services: Service Mgmt: External DNS: https://noobaa-mgmt-openshift-storage.apps.mycluster-cluster.qe.rh-ocs.com https://a3406079515be11eaa3b70683061451e-1194613580.us-east-2.elb.amazonaws.com:443 Internal DNS: https://noobaa-mgmt.openshift-storage.svc:443 Internal IP: https://172.30.235.12:443 Node Ports: https://10.0.142.103:31385 Pod Ports: https://10.131.0.19:8443 serviceS3: External DNS: 3 https://s3-openshift-storage.apps.mycluster-cluster.qe.rh-ocs.com https://a340f4e1315be11eaa3b70683061451e-943168195.us-east-2.elb.amazonaws.com:443 Internal DNS: https://s3.openshift-storage.svc:443 Internal IP: https://172.30.86.41:443 Node Ports: https://10.0.142.103:31011 Pod Ports: https://10.131.0.19:6443", "noobaa status -n openshift-storage", "INFO[0000] Namespace: openshift-storage INFO[0000] INFO[0000] CRD Status: INFO[0003] ✅ Exists: CustomResourceDefinition \"noobaas.noobaa.io\" INFO[0003] ✅ Exists: CustomResourceDefinition \"backingstores.noobaa.io\" INFO[0003] ✅ Exists: CustomResourceDefinition \"bucketclasses.noobaa.io\" INFO[0004] ✅ Exists: CustomResourceDefinition \"objectbucketclaims.objectbucket.io\" INFO[0004] ✅ Exists: CustomResourceDefinition \"objectbuckets.objectbucket.io\" INFO[0004] INFO[0004] Operator Status: INFO[0004] ✅ Exists: Namespace \"openshift-storage\" INFO[0004] ✅ Exists: ServiceAccount \"noobaa\" INFO[0005] ✅ Exists: Role \"ocs-operator.v0.0.271-6g45f\" INFO[0005] ✅ Exists: RoleBinding \"ocs-operator.v0.0.271-6g45f-noobaa-f9vpj\" INFO[0006] ✅ Exists: ClusterRole \"ocs-operator.v0.0.271-fjhgh\" INFO[0006] ✅ Exists: ClusterRoleBinding \"ocs-operator.v0.0.271-fjhgh-noobaa-pdxn5\" INFO[0006] ✅ Exists: Deployment \"noobaa-operator\" INFO[0006] INFO[0006] System Status: INFO[0007] ✅ Exists: NooBaa \"noobaa\" INFO[0007] ✅ Exists: StatefulSet \"noobaa-core\" INFO[0007] ✅ Exists: Service \"noobaa-mgmt\" INFO[0008] ✅ Exists: Service \"s3\" INFO[0008] ✅ Exists: Secret \"noobaa-server\" INFO[0008] ✅ Exists: Secret \"noobaa-operator\" INFO[0008] ✅ Exists: Secret \"noobaa-admin\" INFO[0009] ✅ Exists: StorageClass \"openshift-storage.noobaa.io\" INFO[0009] ✅ Exists: BucketClass \"noobaa-default-bucket-class\" INFO[0009] ✅ (Optional) Exists: BackingStore \"noobaa-default-backing-store\" INFO[0010] ✅ (Optional) Exists: CredentialsRequest \"noobaa-cloud-creds\" INFO[0010] ✅ (Optional) Exists: PrometheusRule \"noobaa-prometheus-rules\" INFO[0010] ✅ (Optional) Exists: ServiceMonitor \"noobaa-service-monitor\" INFO[0011] ✅ (Optional) Exists: Route \"noobaa-mgmt\" INFO[0011] ✅ (Optional) Exists: Route \"s3\" INFO[0011] ✅ Exists: PersistentVolumeClaim \"db-noobaa-core-0\" INFO[0011] ✅ System Phase is \"Ready\" INFO[0011] ✅ Exists: \"noobaa-admin\" #------------------# #- Mgmt Addresses -# #------------------# ExternalDNS : [https://noobaa-mgmt-openshift-storage.apps.mycluster-cluster.qe.rh-ocs.com https://a3406079515be11eaa3b70683061451e-1194613580.us-east-2.elb.amazonaws.com:443] ExternalIP : [] NodePorts : [https://10.0.142.103:31385] InternalDNS : [https://noobaa-mgmt.openshift-storage.svc:443] InternalIP : [https://172.30.235.12:443] PodPorts : [https://10.131.0.19:8443] #--------------------# #- Mgmt Credentials -# #--------------------# email : [email protected] password : HKLbH1rSuVU0I/souIkSiA== #----------------# #- S3 Addresses -# #----------------# 1 ExternalDNS : [https://s3-openshift-storage.apps.mycluster-cluster.qe.rh-ocs.com https://a340f4e1315be11eaa3b70683061451e-943168195.us-east-2.elb.amazonaws.com:443] ExternalIP : [] NodePorts : [https://10.0.142.103:31011] InternalDNS : [https://s3.openshift-storage.svc:443] InternalIP : [https://172.30.86.41:443] PodPorts : [https://10.131.0.19:6443] #------------------# #- S3 Credentials -# #------------------# 2 AWS_ACCESS_KEY_ID : jVmAsu9FsvRHYmfjTiHV 3 AWS_SECRET_ACCESS_KEY : E//420VNedJfATvVSmDz6FMtsSAzuBv6z180PT5c #------------------# #- Backing Stores -# #------------------# NAME TYPE TARGET-BUCKET PHASE AGE noobaa-default-backing-store aws-s3 noobaa-backing-store-15dc896d-7fe0-4bed-9349-5942211b93c9 Ready 141h35m32s #------------------# #- Bucket Classes -# #------------------# NAME PLACEMENT PHASE AGE noobaa-default-bucket-class {Tiers:[{Placement: BackingStores:[noobaa-default-backing-store]}]} Ready 141h35m33s #-----------------# #- Bucket Claims -# #-----------------# No OBC's found.", "AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY> aws --endpoint <ENDPOINT> --no-verify-ssl s3 ls", "oc get backingstore NAME TYPE PHASE AGE noobaa-default-backing-store pv-pool Creating 102s", "oc patch noobaa/noobaa --type json --patch='[{\"op\":\"add\",\"path\":\"/spec/manualDefaultBackingStore\",\"value\":true}]'", "noobaa backingstore create pv-pool _NEW-DEFAULT-BACKING-STORE_ --num-volumes 1 --pv-size-gb 16", "noobaa account update [email protected] --new_default_resource=_NEW-DEFAULT-BACKING-STORE_", "oc patch Bucketclass noobaa-default-bucket-class -n openshift-storage --type=json --patch='[{\"op\": \"replace\", \"path\": \"/spec/placementPolicy/tiers/0/backingStores/0\", \"value\": \"NEW-DEFAULT-BACKING-STORE\"}]'", "oc delete backingstore noobaa-default-backing-store -n openshift-storage | oc patch -n openshift-storage backingstore/noobaa-default-backing-store --type json --patch='[ { \"op\": \"remove\", \"path\": \"/metadata/finalizers\" } ]'", "noobaa backingstore create aws-s3 <backingstore_name> --access-key=<AWS ACCESS KEY> --secret-key=<AWS SECRET ACCESS KEY> --target-bucket <bucket-name> -n openshift-storage", "INFO[0001] ✅ Exists: NooBaa \"noobaa\" INFO[0002] ✅ Created: BackingStore \"aws-resource\" INFO[0002] ✅ Created: Secret \"backing-store-secret-aws-resource\"", "apiVersion: v1 kind: Secret metadata: name: <backingstore-secret-name> namespace: openshift-storage type: Opaque data: AWS_ACCESS_KEY_ID: <AWS ACCESS KEY ID ENCODED IN BASE64> AWS_SECRET_ACCESS_KEY: <AWS SECRET ACCESS KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: BackingStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: bs namespace: openshift-storage spec: awsS3: secret: name: <backingstore-secret-name> namespace: openshift-storage targetBucket: <bucket-name> type: aws-s3", "{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": { \"Federated\": \"arn:aws:iam::123456789123:oidc-provider/mybucket-oidc.s3.us-east-2.amazonaws.com\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\", \"Condition\": { \"StringEquals\": { \"mybucket-oidc.s3.us-east-2.amazonaws.com:sub\": [ \"system:serviceaccount:openshift-storage:noobaa\", \"system:serviceaccount:openshift-storage:noobaa-endpoint\" ] } } } ] }", "#!/bin/bash set -x This is a sample script to help you deploy MCG on AWS STS cluster. This script shows how to create role-policy and then create the role in AWS. For more information see: https://docs.openshift.com/rosa/authentication/assuming-an-aws-iam-role-for-a-service-account.html WARNING: This is a sample script. You need to adjust the variables based on your requirement. Variables : user variables - REPLACE these variables with your values: ROLE_NAME=\"<role-name>\" # role name that you pick in your AWS account NAMESPACE=\"<namespace>\" # namespace name where MCG is running. For OpenShift Data Foundation, it is openshift-storage. MCG variables SERVICE_ACCOUNT_NAME_1=\"<service-account-name-1>\" # The service account name of statefulset core and deployment operator (MCG operator) SERVICE_ACCOUNT_NAME_2=\"<service-account-name-2>\" # The service account name of deployment endpoint (MCG endpoint) AWS variables Make sure these values are not empty (AWS_ACCOUNT_ID, OIDC_PROVIDER) AWS_ACCOUNT_ID is your AWS account number AWS_ACCOUNT_ID=USD(aws sts get-caller-identity --query \"Account\" --output text) If you want to create the role before using the cluster, replace this field too. The OIDC provider is in the structure: 1) <OIDC-bucket>.s3.<aws-region>.amazonaws.com. for OIDC bucket configurations are in an S3 public bucket 2) `<characters>.cloudfront.net` for OIDC bucket configurations in an S3 private bucket with a public CloudFront distribution URL OIDC_PROVIDER=USD(oc get authentication cluster -ojson | jq -r .spec.serviceAccountIssuer | sed -e \"s/^https:\\/\\///\") the permission (S3 full access) POLICY_ARN_STRINGS=\"arn:aws:iam::aws:policy/AmazonS3FullAccess\" Creating the role (with AWS command line interface) read -r -d '' TRUST_RELATIONSHIP <<EOF { \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Principal\": { \"Federated\": \"arn:aws:iam::USD{AWS_ACCOUNT_ID}:oidc-provider/USD{OIDC_PROVIDER}\" }, \"Action\": \"sts:AssumeRoleWithWebIdentity\", \"Condition\": { \"StringEquals\": { \"USD{OIDC_PROVIDER}:sub\": [ \"system:serviceaccount:USD{NAMESPACE}:USD{SERVICE_ACCOUNT_NAME_1}\", \"system:serviceaccount:USD{NAMESPACE}:USD{SERVICE_ACCOUNT_NAME_2}\" ] } } } ] } EOF echo \"USD{TRUST_RELATIONSHIP}\" > trust.json aws iam create-role --role-name \"USDROLE_NAME\" --assume-role-policy-document file://trust.json --description \"role for demo\" while IFS= read -r POLICY_ARN; do echo -n \"Attaching USDPOLICY_ARN ... \" aws iam attach-role-policy --role-name \"USDROLE_NAME\" --policy-arn \"USD{POLICY_ARN}\" echo \"ok.\" done <<< \"USDPOLICY_ARN_STRINGS\"", "noobaa backingstore create aws-sts-s3 <backingstore-name> --aws-sts-arn=<aws-sts-role-arn> --region=<region> --target-bucket=<target-bucket>", "noobaa backingstore create ibm-cos <backingstore_name> --access-key=<IBM ACCESS KEY> --secret-key=<IBM SECRET ACCESS KEY> --endpoint=<IBM COS ENDPOINT> --target-bucket <bucket-name> -n openshift-storage", "INFO[0001] ✅ Exists: NooBaa \"noobaa\" INFO[0002] ✅ Created: BackingStore \"ibm-resource\" INFO[0002] ✅ Created: Secret \"backing-store-secret-ibm-resource\"", "apiVersion: v1 kind: Secret metadata: name: <backingstore-secret-name> namespace: openshift-storage type: Opaque data: IBM_COS_ACCESS_KEY_ID: <IBM COS ACCESS KEY ID ENCODED IN BASE64> IBM_COS_SECRET_ACCESS_KEY: <IBM COS SECRET ACCESS KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: BackingStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: bs namespace: openshift-storage spec: ibmCos: endpoint: <endpoint> secret: name: <backingstore-secret-name> namespace: openshift-storage targetBucket: <bucket-name> type: ibm-cos", "noobaa backingstore create azure-blob <backingstore_name> --account-key=<AZURE ACCOUNT KEY> --account-name=<AZURE ACCOUNT NAME> --target-blob-container <blob container name> -n openshift-storage", "INFO[0001] ✅ Exists: NooBaa \"noobaa\" INFO[0002] ✅ Created: BackingStore \"azure-resource\" INFO[0002] ✅ Created: Secret \"backing-store-secret-azure-resource\"", "apiVersion: v1 kind: Secret metadata: name: <backingstore-secret-name> type: Opaque data: AccountName: <AZURE ACCOUNT NAME ENCODED IN BASE64> AccountKey: <AZURE ACCOUNT KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: BackingStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: bs namespace: openshift-storage spec: azureBlob: secret: name: <backingstore-secret-name> namespace: openshift-storage targetBlobContainer: <blob-container-name> type: azure-blob", "noobaa backingstore create google-cloud-storage <backingstore_name> --private-key-json-file=<PATH TO GCP PRIVATE KEY JSON FILE> --target-bucket <GCP bucket name> -n openshift-storage", "INFO[0001] ✅ Exists: NooBaa \"noobaa\" INFO[0002] ✅ Created: BackingStore \"google-gcp\" INFO[0002] ✅ Created: Secret \"backing-store-google-cloud-storage-gcp\"", "apiVersion: v1 kind: Secret metadata: name: <backingstore-secret-name> type: Opaque data: GoogleServiceAccountPrivateKeyJson: <GCP PRIVATE KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: BackingStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: bs namespace: openshift-storage spec: googleCloudStorage: secret: name: <backingstore-secret-name> namespace: openshift-storage targetBucket: <target bucket> type: google-cloud-storage", "noobaa -n openshift-storage backingstore create pv-pool <backingstore_name> --num-volumes <NUMBER OF VOLUMES> --pv-size-gb <VOLUME SIZE> --request-cpu <CPU REQUEST> --request-memory <MEMORY REQUEST> --limit-cpu <CPU LIMIT> --limit-memory <MEMORY LIMIT> --storage-class <LOCAL STORAGE CLASS>", "apiVersion: noobaa.io/v1alpha1 kind: BackingStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: <backingstore_name> namespace: openshift-storage spec: pvPool: numVolumes: <NUMBER OF VOLUMES> resources: requests: storage: <VOLUME SIZE> cpu: <CPU REQUEST> memory: <MEMORY REQUEST> limits: cpu: <CPU LIMIT> memory: <MEMORY LIMIT> storageClass: <LOCAL STORAGE CLASS> type: pv-pool", "INFO[0001] ✅ Exists: NooBaa \"noobaa\" INFO[0002] ✅ Exists: BackingStore \"local-mcg-storage\"", "noobaa backingstore create s3-compatible rgw-resource --access-key=<RGW ACCESS KEY> --secret-key=<RGW SECRET KEY> --target-bucket=<bucket-name> --endpoint=<RGW endpoint> -n openshift-storage", "get secret <RGW USER SECRET NAME> -o yaml -n openshift-storage", "INFO[0001] ✅ Exists: NooBaa \"noobaa\" INFO[0002] ✅ Created: BackingStore \"rgw-resource\" INFO[0002] ✅ Created: Secret \"backing-store-secret-rgw-resource\"", "apiVersion: ceph.rook.io/v1 kind: CephObjectStoreUser metadata: name: <RGW-Username> namespace: openshift-storage spec: store: ocs-storagecluster-cephobjectstore displayName: \"<Display-name>\"", "apiVersion: noobaa.io/v1alpha1 kind: BackingStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: <backingstore-name> namespace: openshift-storage spec: s3Compatible: endpoint: <RGW endpoint> secret: name: <backingstore-secret-name> namespace: openshift-storage signatureVersion: v4 targetBucket: <RGW-bucket-name> type: s3-compatible", "apiVersion: v1 kind: Secret metadata: name: <namespacestore-secret-name> type: Opaque data: AWS_ACCESS_KEY_ID: <AWS ACCESS KEY ID ENCODED IN BASE64> AWS_SECRET_ACCESS_KEY: <AWS SECRET ACCESS KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: NamespaceStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: <resource-name> namespace: openshift-storage spec: awsS3: secret: name: <namespacestore-secret-name> namespace: <namespace-secret> targetBucket: <target-bucket> type: aws-s3", "apiVersion: noobaa.io/v1alpha1 kind: BucketClass metadata: labels: app: noobaa name: <my-bucket-class> namespace: openshift-storage spec: namespacePolicy: type: single: resource: <resource>", "apiVersion: noobaa.io/v1alpha1 kind: BucketClass metadata: labels: app: noobaa name: <my-bucket-class> namespace: openshift-storage spec: namespacePolicy: type: Multi multi: writeResource: <write-resource> readResources: - <read-resources> - <read-resources>", "apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: <resource-name> namespace: openshift-storage spec: generateBucketName: <my-bucket> storageClassName: openshift-storage.noobaa.io additionalConfig: bucketclass: <my-bucket-class>", "apiVersion: v1 kind: Secret metadata: name: <namespacestore-secret-name> type: Opaque data: IBM_COS_ACCESS_KEY_ID: <IBM COS ACCESS KEY ID ENCODED IN BASE64> IBM_COS_SECRET_ACCESS_KEY: <IBM COS SECRET ACCESS KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: NamespaceStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: bs namespace: openshift-storage spec: s3Compatible: endpoint: <IBM COS ENDPOINT> secret: name: <namespacestore-secret-name> namespace: <namespace-secret> signatureVersion: v2 targetBucket: <target-bucket> type: ibm-cos", "apiVersion: noobaa.io/v1alpha1 kind: BucketClass metadata: labels: app: noobaa name: <my-bucket-class> namespace: openshift-storage spec: namespacePolicy: type: single: resource: <resource>", "apiVersion: noobaa.io/v1alpha1 kind: BucketClass metadata: labels: app: noobaa name: <my-bucket-class> namespace: openshift-storage spec: namespacePolicy: type: Multi multi: writeResource: <write-resource> readResources: - <read-resources> - <read-resources>", "apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: <resource-name> namespace: openshift-storage spec: generateBucketName: <my-bucket> storageClassName: openshift-storage.noobaa.io additionalConfig: bucketclass: <my-bucket-class>", "noobaa namespacestore create aws-s3 <namespacestore> --access-key <AWS ACCESS KEY> --secret-key <AWS SECRET ACCESS KEY> --target-bucket <bucket-name> -n openshift-storage", "noobaa bucketclass create namespace-bucketclass single <my-bucket-class> --resource <resource> -n openshift-storage", "noobaa bucketclass create namespace-bucketclass multi <my-bucket-class> --write-resource <write-resource> --read-resources <read-resources> -n openshift-storage", "noobaa obc create my-bucket-claim -n openshift-storage --app-namespace my-app --bucketclass <custom-bucket-class>", "noobaa namespacestore create ibm-cos <namespacestore> --endpoint <IBM COS ENDPOINT> --access-key <IBM ACCESS KEY> --secret-key <IBM SECRET ACCESS KEY> --target-bucket <bucket-name> -n openshift-storage", "noobaa bucketclass create namespace-bucketclass single <my-bucket-class> --resource <resource> -n openshift-storage", "noobaa bucketclass create namespace-bucketclass multi <my-bucket-class> --write-resource <write-resource> --read-resources <read-resources> -n openshift-storage", "noobaa obc create my-bucket-claim -n openshift-storage --app-namespace my-app --bucketclass <custom-bucket-class>", "noobaa account create <noobaa-account-name> [flags]", "noobaa account create testaccount --full_permission --nsfs_account_config --gid 10001 --uid 10001 -default_resource fs_namespacestore", "NooBaaAccount spec: allow_bucket_creation: true Allowed_buckets: full_permission: true permission_list: [] default_resource: noobaa-default-namespace-store Nsfs_account_config: gid: 10001 new_buckets_path: / nsfs_only: true uid: 10001 INFO[0006] ✅ Exists: Secret \"noobaa-account-testaccount\" Connection info: AWS_ACCESS_KEY_ID : <aws-access-key-id> AWS_SECRET_ACCESS_KEY : <aws-secret-access-key>", "noobaa account list NAME ALLOWED_BUCKETS DEFAULT_RESOURCE PHASE AGE testaccount [*] noobaa-default-backing-store Ready 1m17s", "oc get noobaaaccount/testaccount -o yaml spec: allow_bucket_creation: true allowed_buckets: full_permission: true permission_list: [] default_resource: noobaa-default-namespace-store nsfs_account_config: gid: 10001 new_buckets_path: / nsfs_only: true uid: 10001", "oc get ns <application_namespace> -o yaml | grep scc", "oc get ns testnamespace -o yaml | grep scc openshift.io/sa.scc.mcs: s0:c26,c5 openshift.io/sa.scc.supplemental-groups: 1000660000/10000 openshift.io/sa.scc.uid-range: 1000660000/10000", "oc project <application_namespace>", "oc project testnamespace", "oc get pvc NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE cephfs-write-workload-generator-no-cache-pv-claim Bound pvc-aa58fb91-c3d2-475b-bbee-68452a613e1a 10Gi RWX ocs-storagecluster-cephfs 12s", "oc get pod NAME READY STATUS RESTARTS AGE cephfs-write-workload-generator-no-cache-1-cv892 1/1 Running 0 11s", "oc get pods <pod_name> -o jsonpath='{.spec.volumes[]}'", "oc get pods cephfs-write-workload-generator-no-cache-1-cv892 -o jsonpath='{.spec.volumes[]}' {\"name\":\"app-persistent-storage\",\"persistentVolumeClaim\":{\"claimName\":\"cephfs-write-workload-generator-no-cache-pv-claim\"}}", "oc get pods <pod_name> -o jsonpath='{.spec.containers[].volumeMounts}'", "oc get pods cephfs-write-workload-generator-no-cache-1-cv892 -o jsonpath='{.spec.containers[].volumeMounts}' [{\"mountPath\":\"/mnt/pv\",\"name\":\"app-persistent-storage\"},{\"mountPath\":\"/var/run/secrets/kubernetes.io/serviceaccount\",\"name\":\"kube-api-access-8tnc5\",\"readOnly\":true}]", "oc exec -it <pod_name> -- df <mount_path>", "oc exec -it cephfs-write-workload-generator-no-cache-1-cv892 -- df /mnt/pv main Filesystem 1K-blocks Used Available Use% Mounted on 172.30.202.87:6789,172.30.120.254:6789,172.30.77.247:6789:/volumes/csi/csi-vol-cc416d9e-dbf3-11ec-b286-0a580a810213/edcfe4d5-bdcb-4b8e-8824-8a03ad94d67c 10485760 0 10485760 0% /mnt/pv", "oc exec -it <pod_name> -- ls -latrZ <mount_path>", "oc exec -it cephfs-write-workload-generator-no-cache-1-cv892 -- ls -latrZ /mnt/pv/ total 567 drwxrwxrwx. 3 root root system_u:object_r:container_file_t:s0:c26,c5 2 May 25 06:35 . -rw-r--r--. 1 1000660000 root system_u:object_r:container_file_t:s0:c26,c5 580138 May 25 06:35 fs_write_cephfs-write-workload-generator-no-cache-1-cv892-data.log drwxrwxrwx. 3 root root system_u:object_r:container_file_t:s0:c26,c5 30 May 25 06:35 ..", "oc get pv | grep <pv_name>", "oc get pv | grep pvc-aa58fb91-c3d2-475b-bbee-68452a613e1a pvc-aa58fb91-c3d2-475b-bbee-68452a613e1a 10Gi RWX Delete Bound testnamespace/cephfs-write-workload-generator-no-cache-pv-claim ocs-storagecluster-cephfs 47s", "oc get pv <pv_name> -o yaml", "oc get pv pvc-aa58fb91-c3d2-475b-bbee-68452a613e1a -o yaml apiVersion: v1 kind: PersistentVolume metadata: annotations: pv.kubernetes.io/provisioned-by: openshift-storage.cephfs.csi.ceph.com creationTimestamp: \"2022-05-25T06:27:49Z\" finalizers: - kubernetes.io/pv-protection name: pvc-aa58fb91-c3d2-475b-bbee-68452a613e1a resourceVersion: \"177458\" uid: 683fa87b-5192-4ccf-af2f-68c6bcf8f500 spec: accessModes: - ReadWriteMany capacity: storage: 10Gi claimRef: apiVersion: v1 kind: PersistentVolumeClaim name: cephfs-write-workload-generator-no-cache-pv-claim namespace: testnamespace resourceVersion: \"177453\" uid: aa58fb91-c3d2-475b-bbee-68452a613e1a csi: controllerExpandSecretRef: name: rook-csi-cephfs-provisioner namespace: openshift-storage driver: openshift-storage.cephfs.csi.ceph.com nodeStageSecretRef: name: rook-csi-cephfs-node namespace: openshift-storage volumeAttributes: clusterID: openshift-storage fsName: ocs-storagecluster-cephfilesystem storage.kubernetes.io/csiProvisionerIdentity: 1653458225664-8081-openshift-storage.cephfs.csi.ceph.com subvolumeName: csi-vol-cc416d9e-dbf3-11ec-b286-0a580a810213 subvolumePath: /volumes/csi/csi-vol-cc416d9e-dbf3-11ec-b286-0a580a810213/edcfe4d5-bdcb-4b8e-8824-8a03ad94d67c volumeHandle: 0001-0011-openshift-storage-0000000000000001-cc416d9e-dbf3-11ec-b286-0a580a810213 persistentVolumeReclaimPolicy: Delete storageClassName: ocs-storagecluster-cephfs volumeMode: Filesystem status: phase: Bound", "cat << EOF >> pv-openshift-storage.yaml apiVersion: v1 kind: PersistentVolume metadata: name: cephfs-pv-legacy-openshift-storage spec: storageClassName: \"\" accessModes: - ReadWriteMany capacity: storage: 10Gi 1 csi: driver: openshift-storage.cephfs.csi.ceph.com nodeStageSecretRef: name: rook-csi-cephfs-node namespace: openshift-storage volumeAttributes: # Volume Attributes can be copied from the Source testnamespace PV \"clusterID\": \"openshift-storage\" \"fsName\": \"ocs-storagecluster-cephfilesystem\" \"staticVolume\": \"true\" # rootpath is the subvolumePath: you copied from the Source testnamespace PV \"rootPath\": /volumes/csi/csi-vol-cc416d9e-dbf3-11ec-b286-0a580a810213/edcfe4d5-bdcb-4b8e-8824-8a03ad94d67c volumeHandle: 0001-0011-openshift-storage-0000000000000001-cc416d9e-dbf3-11ec-b286-0a580a810213-clone 2 persistentVolumeReclaimPolicy: Retain volumeMode: Filesystem --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: cephfs-pvc-legacy namespace: openshift-storage spec: storageClassName: \"\" accessModes: - ReadWriteMany resources: requests: storage: 10Gi 3 volumeMode: Filesystem # volumeName should be same as PV name volumeName: cephfs-pv-legacy-openshift-storage EOF", "oc create -f <YAML_file>", "oc create -f pv-openshift-storage.yaml persistentvolume/cephfs-pv-legacy-openshift-storage created persistentvolumeclaim/cephfs-pvc-legacy created", "oc get pvc -n openshift-storage NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE cephfs-pvc-legacy Bound cephfs-pv-legacy-openshift-storage 10Gi RWX 14s", "oc project openshift-storage Now using project \"openshift-storage\" on server \"https://api.cluster-5f6ng.5f6ng.sandbox65.opentlc.com:6443\".", "noobaa namespacestore create nsfs <nsfs_namespacestore> --pvc-name=' <cephfs_pvc_name> ' --fs-backend='CEPH_FS'", "noobaa namespacestore create nsfs legacy-namespace --pvc-name='cephfs-pvc-legacy' --fs-backend='CEPH_FS'", "oc exec -it <noobaa_endpoint_pod_name> -- df -h /nsfs/ <nsfs_namespacestore>", "oc exec -it noobaa-endpoint-5875f467f5-546c6 -- df -h /nsfs/legacy-namespace Filesystem Size Used Avail Use% Mounted on 172.30.202.87:6789,172.30.120.254:6789,172.30.77.247:6789:/volumes/csi/csi-vol-cc416d9e-dbf3-11ec-b286-0a580a810213/edcfe4d5-bdcb-4b8e-8824-8a03ad94d67c 10G 0 10G 0% /nsfs/legacy-namespace", "noobaa account create <user_account> --full_permission --allow_bucket_create=true --new_buckets_path='/' --nsfs_only=true --nsfs_account_config=true --gid <gid_number> --uid <uid_number> --default_resource='legacy-namespace'", "noobaa account create leguser --full_permission --allow_bucket_create=true --new_buckets_path='/' --nsfs_only=true --nsfs_account_config=true --gid 0 --uid 1000660000 --default_resource='legacy-namespace'", "oc exec -it <pod_name> -- mkdir <mount_path> /nsfs", "oc exec -it cephfs-write-workload-generator-no-cache-1-cv892 -- mkdir /mnt/pv/nsfs", "noobaa api bucket_api create_bucket '{ \"name\": \" <bucket_name> \", \"namespace\":{ \"write_resource\": { \"resource\": \" <nsfs_namespacestore> \", \"path\": \"nsfs/\" }, \"read_resources\": [ { \"resource\": \" <nsfs_namespacestore> \", \"path\": \"nsfs/\" }] } }'", "noobaa api bucket_api create_bucket '{ \"name\": \"legacy-bucket\", \"namespace\":{ \"write_resource\": { \"resource\": \"legacy-namespace\", \"path\": \"nsfs/\" }, \"read_resources\": [ { \"resource\": \"legacy-namespace\", \"path\": \"nsfs/\" }] } }'", "oc exec -it <noobaa_endpoint_pod_name> -n openshift-storage -- ls -ltraZ /nsfs/ <nsfs_namespacstore>", "oc exec -it noobaa-endpoint-5875f467f5-546c6 -n openshift-storage -- ls -ltraZ /nsfs/legacy-namespace total 567 drwxrwxrwx. 3 root root system_u:object_r:container_file_t:s0:c0,c26 2 May 25 06:35 . -rw-r--r--. 1 1000660000 root system_u:object_r:container_file_t:s0:c0,c26 580138 May 25 06:35 fs_write_cephfs-write-workload-generator-no-cache-1-cv892-data.log drwxrwxrwx. 3 root root system_u:object_r:container_file_t:s0:c0,c26 30 May 25 06:35 ..", "oc exec -it <pod_name> -- ls -latrZ <mount_path>", "oc exec -it cephfs-write-workload-generator-no-cache-1-cv892 -- ls -latrZ /mnt/pv/ total 567 drwxrwxrwx. 3 root root system_u:object_r:container_file_t:s0:c26,c5 2 May 25 06:35 . -rw-r--r--. 1 1000660000 root system_u:object_r:container_file_t:s0:c26,c5 580138 May 25 06:35 fs_write_cephfs-write-workload-generator-no-cache-1-cv892-data.log drwxrwxrwx. 3 root root system_u:object_r:container_file_t:s0:c26,c5 30 May 25 06:35 ..", "noobaa bucket delete <bucket_name>", "noobaa bucket delete legacy-bucket", "noobaa account delete <user_account>", "noobaa account delete leguser", "noobaa namespacestore delete <nsfs_namespacestore>", "noobaa namespacestore delete legacy-namespace", "oc delete pv <cephfs_pv_name>", "oc delete pvc <cephfs_pvc_name>", "oc delete pv cephfs-pv-legacy-openshift-storage", "oc delete pvc cephfs-pvc-legacy", "oc get ns openshift-storage -o yaml | grep sa.scc.mcs openshift.io/sa.scc.mcs: s0:c26,c0", "oc edit ns <appplication_namespace>", "oc edit ns testnamespace", "oc get ns <application_namespace> -o yaml | grep sa.scc.mcs", "oc get ns testnamespace -o yaml | grep sa.scc.mcs openshift.io/sa.scc.mcs: s0:c26,c0", "cat << EOF >> scc.yaml allowHostDirVolumePlugin: false allowHostIPC: false allowHostNetwork: false allowHostPID: false allowHostPorts: false allowPrivilegeEscalation: true allowPrivilegedContainer: false allowedCapabilities: null apiVersion: security.openshift.io/v1 defaultAddCapabilities: null fsGroup: type: MustRunAs groups: - system:authenticated kind: SecurityContextConstraints metadata: annotations: name: restricted-pvselinux priority: null readOnlyRootFilesystem: false requiredDropCapabilities: - KILL - MKNOD - SETUID - SETGID runAsUser: type: MustRunAsRange seLinuxContext: seLinuxOptions: level: s0:c26,c0 type: MustRunAs supplementalGroups: type: RunAsAny users: [] volumes: - configMap - downwardAPI - emptyDir - persistentVolumeClaim - projected - secret EOF", "oc create -f scc.yaml", "oc create serviceaccount <service_account_name>", "oc create serviceaccount testnamespacesa", "oc adm policy add-scc-to-user restricted-pvselinux -z <service_account_name>", "oc adm policy add-scc-to-user restricted-pvselinux -z testnamespacesa", "oc patch dc/ <pod_name> '{\"spec\":{\"template\":{\"spec\":{\"serviceAccountName\": \" <service_account_name> \"}}}}'", "oc patch dc/cephfs-write-workload-generator-no-cache --patch '{\"spec\":{\"template\":{\"spec\":{\"serviceAccountName\": \"testnamespacesa\"}}}}'", "oc edit dc <pod_name> -n <application_namespace>", "spec: template: metadata: securityContext: seLinuxOptions: Level: <security_context_value>", "oc edit dc cephfs-write-workload-generator-no-cache -n testnamespace", "spec: template: metadata: securityContext: seLinuxOptions: level: s0:c26,c0", "oc get dc <pod_name> -n <application_namespace> -o yaml | grep -A 2 securityContext", "oc get dc cephfs-write-workload-generator-no-cache -n testnamespace -o yaml | grep -A 2 securityContext securityContext: seLinuxOptions: level: s0:c26,c0", "noobaa account passwd <noobaa_account_name> [options]", "noobaa account passwd FATA[0000] ❌ Missing expected arguments: <noobaa_account_name> Options: --new-password='': New Password for authentication - the best practice is to omit this flag , in that case the CLI will prompt to prompt and read it securely from the terminal to avoid leaking secrets in t he shell history --old-password='': Old Password for authentication - the best practice is to omit this flag , in that case the CLI will prompt to prompt and read it securely from the terminal to avoid leaking secrets in the shell history --retype-new-password='': Retype new Password for authentication - the best practice is to omit this flag , in that case the CLI will prompt to prompt and read it securely from the terminal to avoid leaking secrets in the shell history Usage: noobaa account passwd <noobaa-account-name> [flags] [options] Use \"noobaa options\" for a list of global command-line options (applies to all commands).", "noobaa account passwd [email protected]", "Enter old-password: [got 24 characters] Enter new-password: [got 7 characters] Enter retype-new-password: [got 7 characters] INFO[0017] ✅ Exists: Secret \"noobaa-admin\" INFO[0017] ✅ Exists: NooBaa \"noobaa\" INFO[0017] ✅ Exists: Service \"noobaa-mgmt\" INFO[0017] ✅ Exists: Secret \"noobaa-operator\" INFO[0017] ✅ Exists: Secret \"noobaa-admin\" INFO[0017] ✈\\ufe0f RPC: account.reset_password() Request: {Email:[email protected] VerificationPassword: * Password: *} WARN[0017] RPC: GetConnection creating connection to wss://localhost:58460/rpc/ 0xc000402ae0 INFO[0017] RPC: Connecting websocket (0xc000402ae0) &{RPC:0xc000501a40 Address:wss://localhost:58460/rpc/ State:init WS:<nil> PendingRequests:map[] NextRequestID:0 Lock:{state:1 sema:0} ReconnectDelay:0s cancelPings:<nil>} INFO[0017] RPC: Connected websocket (0xc000402ae0) &{RPC:0xc000501a40 Address:wss://localhost:58460/rpc/ State:init WS:<nil> PendingRequests:map[] NextRequestID:0 Lock:{state:1 sema:0} ReconnectDelay:0s cancelPings:<nil>} INFO[0020] ✅ RPC: account.reset_password() Response OK: took 2907.1ms INFO[0020] ✅ Updated: \"noobaa-admin\" INFO[0020] ✅ Successfully reset the password for the account \"[email protected]\"", "-------------------- - Mgmt Credentials - -------------------- email : [email protected] password : ***", "noobaa account list", "NAME ALLOWED_BUCKETS DEFAULT_RESOURCE PHASE AGE account-test [*] noobaa-default-backing-store Ready 14m17s test2 [first.bucket] noobaa-default-backing-store Ready 3m12s", "oc get noobaaaccount", "NAME PHASE AGE account-test Ready 15m test2 Ready 3m59s", "noobaa account regenerate <noobaa_account_name> [options]", "noobaa account regenerate FATA[0000] ❌ Missing expected arguments: <noobaa-account-name> Usage: noobaa account regenerate <noobaa-account-name> [flags] [options] Use \"noobaa options\" for a list of global command-line options (applies to all commands).", "noobaa account regenerate account-test", "INFO[0000] You are about to regenerate an account's security credentials. INFO[0000] This will invalidate all connections between S3 clients and NooBaa which are connected using the current credentials. INFO[0000] are you sure? y/n", "INFO[0015] ✅ Exists: Secret \"noobaa-account-account-test\" Connection info: AWS_ACCESS_KEY_ID : *** AWS_SECRET_ACCESS_KEY : ***", "noobaa obc list", "NAMESPACE NAME BUCKET-NAME STORAGE-CLASS BUCKET-CLASS PHASE default obc-test obc-test-35800e50-8978-461f-b7e0-7793080e26ba default.noobaa.io noobaa-default-bucket-class Bound", "oc get obc", "NAME STORAGE-CLASS PHASE AGE obc-test default.noobaa.io Bound 38s", "noobaa obc regenerate <bucket_claim_name> [options]", "noobaa obc regenerate FATA[0000] ❌ Missing expected arguments: <bucket-claim-name> Usage: noobaa obc regenerate <bucket-claim-name> [flags] [options] Use \"noobaa options\" for a list of global command-line options (applies to all commands).", "noobaa obc regenerate obc-test", "INFO[0000] You are about to regenerate an OBC's security credentials. INFO[0000] This will invalidate all connections between S3 clients and NooBaa which are connected using the current credentials. INFO[0000] are you sure? y/n", "INFO[0022] ✅ RPC: bucket.read_bucket() Response OK: took 95.4ms ObjectBucketClaim info: Phase : Bound ObjectBucketClaim : kubectl get -n default objectbucketclaim obc-test ConfigMap : kubectl get -n default configmap obc-test Secret : kubectl get -n default secret obc-test ObjectBucket : kubectl get objectbucket obc-default-obc-test StorageClass : kubectl get storageclass default.noobaa.io BucketClass : kubectl get -n default bucketclass noobaa-default-bucket-class Connection info: BUCKET_HOST : s3.default.svc BUCKET_NAME : obc-test-35800e50-8978-461f-b7e0-7793080e26ba BUCKET_PORT : 443 AWS_ACCESS_KEY_ID : *** AWS_SECRET_ACCESS_KEY : *** Shell commands: AWS S3 Alias : alias s3='AWS_ACCESS_KEY_ID=*** AWS_SECRET_ACCESS_KEY =*** aws s3 --no-verify-ssl --endpoint-url ***' Bucket status: Name : obc-test-35800e50-8978-461f-b7e0-7793080e26ba Type : REGULAR Mode : OPTIMAL ResiliencyStatus : OPTIMAL QuotaStatus : QUOTA_NOT_SET Num Objects : 0 Data Size : 0.000 B Data Size Reduced : 0.000 B Data Space Avail : 13.261 GB Num Objects Avail : 9007199254740991", "oc edit noobaa -n openshift-storage noobaa", "spec: loadBalancerSourceSubnets: s3: [\"10.0.0.0/16\", \"192.168.10.0/32\"] sts: - \"10.0.0.0/16\" - \"192.168.10.0/32\"", "oc get svc -n openshift-storage <s3 | sts> -o=go-template='{{ .spec.loadBalancerSourceRanges }}'", "noobaa bucketclass create placement-bucketclass mirror-to-aws --backingstores=azure-resource,aws-resource --placement Mirror", "noobaa obc create mirrored-bucket --bucketclass=mirror-to-aws", "apiVersion: noobaa.io/v1alpha1 kind: BucketClass metadata: labels: app: noobaa name: <bucket-class-name> namespace: openshift-storage spec: placementPolicy: tiers: - backingStores: - <backing-store-1> - <backing-store-2> placement: Mirror", "additionalConfig: bucketclass: mirror-to-aws", "{ \"Version\": \"NewVersion\", \"Statement\": [ { \"Sid\": \"Example\", \"Effect\": \"Allow\", \"Principal\": [ \"[email protected]\" ], \"Action\": [ \"s3:GetObject\" ], \"Resource\": [ \"arn:aws:s3:::john_bucket\" ] } ] }", "aws --endpoint ENDPOINT --no-verify-ssl s3api put-bucket-policy --bucket MyBucket --policy file:// BucketPolicy", "aws --endpoint https://s3-openshift-storage.apps.gogo44.noobaa.org --no-verify-ssl s3api put-bucket-policy -bucket MyBucket --policy file://BucketPolicy", "noobaa account create <noobaa-account-name> [--allow_bucket_create=true] [--allowed_buckets=[]] [--default_resource=''] [--full_permission=false]", "noobaa obc create <bucket-claim-name> -n openshift-storage --replication-policy /path/to/json-file.json", "[{ \"rule_id\": \"rule-1\", \"destination_bucket\": \"first.bucket\", \"filter\": {\"prefix\": \"repl\"}}]", "noobaa obc create my-bucket-claim -n openshift-storage --replication-policy /path/to/json-file.json", "apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: <desired-bucket-claim> namespace: <desired-namespace> spec: generateBucketName: <desired-bucket-name> storageClassName: openshift-storage.noobaa.io additionalConfig: replicationPolicy: {\"rules\": [{ \"rule_id\": \"\", \"destination_bucket\": \"\", \"filter\": {\"prefix\": \"\"}}]}", "noobaa -n openshift-storage bucketclass create placement-bucketclass <bucketclass-name> --backingstores <backingstores> --replication-policy=/path/to/json-file.json", "[{ \"rule_id\": \"rule-1\", \"destination_bucket\": \"first.bucket\", \"filter\": {\"prefix\": \"repl\"}}]", "noobaa -n openshift-storage bucketclass create placement-bucketclass bc --backingstores azure-blob-ns --replication-policy=/path/to/json-file.json", "apiVersion: noobaa.io/v1alpha1 kind: BucketClass metadata: labels: app: <desired-app-label> name: <desired-bucketclass-name> namespace: <desired-namespace> spec: placementPolicy: tiers: - backingstores: - <backingstore> placement: Spread replicationPolicy: [{ \"rule_id\": \" <rule id> \", \"destination_bucket\": \"first.bucket\", \"filter\": {\"prefix\": \" <object name prefix> \"}}]", "replicationPolicy: '{\"rules\":[{\"rule_id\":\"<RULE ID>\", \"destination_bucket\":\"<DEST>\", \"filter\": {\"prefix\": \"<PREFIX>\"}}], \"log_replication_info\": {\"logs_location\": {\"logs_bucket\": \"<LOGS_BUCKET>\"}}}'", "apiVersion: v1 kind: Secret metadata: name: <namespacestore-secret-name> type: Opaque data: TenantID: <AZURE TENANT ID ENCODED IN BASE64> ApplicationID: <AZURE APPLICATIOM ID ENCODED IN BASE64> ApplicationSecret: <AZURE APPLICATION SECRET ENCODED IN BASE64> LogsAnalyticsWorkspaceID: <AZURE LOG ANALYTICS WORKSPACE ID ENCODED IN BASE64> AccountName: <AZURE ACCOUNT NAME ENCODED IN BASE64> AccountKey: <AZURE ACCOUNT KEY ENCODED IN BASE64>", "replicationPolicy:'{\"rules\":[ {\"rule_id\":\"ID goes here\", \"sync_deletions\": \"<true or false>\"\", \"destination_bucket\":object bucket name\"} ], \"log_replication_info\":{\"endpoint_type\":\"AZURE\"}}'", "nb bucket create data.bucket", "nb bucket create log.bucket", "nb api bucket_api put_bucket_logging '{ \"name\": \"data.bucket\", \"log_bucket\": \"log.bucket\", \"log_prefix\": \"data-bucket-logs\" }'", "nb api bucket_api get_bucket_logging '{ \"name\": \"data.bucket\" }'", "s3_alias cp s3://logs.bucket/data-bucket-logs/logs.bucket.bucket_data-bucket-logs_1719230150.log - | tail -n 2 Jun 24 14:00:02 10-XXX-X-XXX.sts.openshift-storage.svc.cluster.local {\"noobaa_bucket_logging\":\"true\",\"op\":\"GET\",\"bucket_owner\":\"[email protected]\",\"source_bucket\":\"data.bucket\",\"object_key\":\"/data.bucket?list-type=2&prefix=data-bucket-logs&delimiter=%2F&encoding-type=url\",\"log_bucket\":\"logs.bucket\",\"remote_ip\":\"100.XX.X.X\",\"request_uri\":\"/data.bucket?list-type=2&prefix=data-bucket-logs&delimiter=%2F&encoding-type=url\",\"request_id\":\"luv2XXXX-ctyg2k-12gs\"} Jun 24 14:00:06 10-XXX-X-XXX.s3.openshift-storage.svc.cluster.local {\"noobaa_bucket_logging\":\"true\",\"op\":\"PUT\",\"bucket_owner\":\"[email protected]\",\"source_bucket\":\"data.bucket\",\"object_key\":\"/data.bucket/B69EC83F-0177-44D8-A8D1-4A10C5A5AB0F.file\",\"log_bucket\":\"logs.bucket\",\"remote_ip\":\"100.XX.X.X\",\"request_uri\":\"/data.bucket/B69EC83F-0177-44D8-A8D1-4A10C5A5AB0F.file\",\"request_id\":\"luv2XXXX-9syea5-x5z\"}", "nb api bucket_api delete_bucket_logging '{ \"name\": \"data.bucket\" }'", "apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: <obc-name> spec: generateBucketName: <obc-bucket-name> storageClassName: openshift-storage.noobaa.io", "apiVersion: batch/v1 kind: Job metadata: name: testjob spec: template: spec: restartPolicy: OnFailure containers: - image: <your application image> name: test env: - name: BUCKET_NAME valueFrom: configMapKeyRef: name: <obc-name> key: BUCKET_NAME - name: BUCKET_HOST valueFrom: configMapKeyRef: name: <obc-name> key: BUCKET_HOST - name: BUCKET_PORT valueFrom: configMapKeyRef: name: <obc-name> key: BUCKET_PORT - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: name: <obc-name> key: AWS_ACCESS_KEY_ID - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: name: <obc-name> key: AWS_SECRET_ACCESS_KEY", "oc apply -f <yaml.file>", "oc get cm <obc-name> -o yaml", "oc get secret <obc_name> -o yaml", "noobaa obc create <obc-name> -n openshift-storage", "INFO[0001] ✅ Created: ObjectBucketClaim \"test21obc\"", "oc get obc -n openshift-storage", "NAME STORAGE-CLASS PHASE AGE test21obc openshift-storage.noobaa.io Bound 38s", "oc get obc test21obc -o yaml -n openshift-storage", "apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: creationTimestamp: \"2019-10-24T13:30:07Z\" finalizers: - objectbucket.io/finalizer generation: 2 labels: app: noobaa bucket-provisioner: openshift-storage.noobaa.io-obc noobaa-domain: openshift-storage.noobaa.io name: test21obc namespace: openshift-storage resourceVersion: \"40756\" selfLink: /apis/objectbucket.io/v1alpha1/namespaces/openshift-storage/objectbucketclaims/test21obc uid: 64f04cba-f662-11e9-bc3c-0295250841af spec: ObjectBucketName: obc-openshift-storage-test21obc bucketName: test21obc-933348a6-e267-4f82-82f1-e59bf4fe3bb4 generateBucketName: test21obc storageClassName: openshift-storage.noobaa.io status: phase: Bound", "oc get -n openshift-storage secret test21obc -o yaml", "apiVersion: v1 data: AWS_ACCESS_KEY_ID: c0M0R2xVanF3ODR3bHBkVW94cmY= AWS_SECRET_ACCESS_KEY: Wi9kcFluSWxHRzlWaFlzNk1hc0xma2JXcjM1MVhqa051SlBleXpmOQ== kind: Secret metadata: creationTimestamp: \"2019-10-24T13:30:07Z\" finalizers: - objectbucket.io/finalizer labels: app: noobaa bucket-provisioner: openshift-storage.noobaa.io-obc noobaa-domain: openshift-storage.noobaa.io name: test21obc namespace: openshift-storage ownerReferences: - apiVersion: objectbucket.io/v1alpha1 blockOwnerDeletion: true controller: true kind: ObjectBucketClaim name: test21obc uid: 64f04cba-f662-11e9-bc3c-0295250841af resourceVersion: \"40751\" selfLink: /api/v1/namespaces/openshift-storage/secrets/test21obc uid: 65117c1c-f662-11e9-9094-0a5305de57bb type: Opaque", "oc get -n openshift-storage cm test21obc -o yaml", "apiVersion: v1 data: BUCKET_HOST: 10.0.171.35 BUCKET_NAME: test21obc-933348a6-e267-4f82-82f1-e59bf4fe3bb4 BUCKET_PORT: \"31242\" BUCKET_REGION: \"\" BUCKET_SUBREGION: \"\" kind: ConfigMap metadata: creationTimestamp: \"2019-10-24T13:30:07Z\" finalizers: - objectbucket.io/finalizer labels: app: noobaa bucket-provisioner: openshift-storage.noobaa.io-obc noobaa-domain: openshift-storage.noobaa.io name: test21obc namespace: openshift-storage ownerReferences: - apiVersion: objectbucket.io/v1alpha1 blockOwnerDeletion: true controller: true kind: ObjectBucketClaim name: test21obc uid: 64f04cba-f662-11e9-bc3c-0295250841af resourceVersion: \"40752\" selfLink: /api/v1/namespaces/openshift-storage/configmaps/test21obc uid: 651c6501-f662-11e9-9094-0a5305de57bb", "noobaa namespacestore create aws-s3 <namespacestore> --access-key <AWS ACCESS KEY> --secret-key <AWS SECRET ACCESS KEY> --target-bucket <bucket-name>", "apiVersion: v1 kind: Secret metadata: name: <namespacestore-secret-name> type: Opaque data: AWS_ACCESS_KEY_ID: <AWS ACCESS KEY ID ENCODED IN BASE64> AWS_SECRET_ACCESS_KEY: <AWS SECRET ACCESS KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: NamespaceStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: <namespacestore> namespace: openshift-storage spec: awsS3: secret: name: <namespacestore-secret-name> namespace: <namespace-secret> targetBucket: <target-bucket> type: aws-s3", "noobaa bucketclass create namespace-bucketclass cache <my-cache-bucket-class> --backingstores <backing-store> --hub-resource <namespacestore>", "noobaa obc create <my-bucket-claim> my-app --bucketclass <custom-bucket-class>", "noobaa namespacestore create ibm-cos <namespacestore> --endpoint <IBM COS ENDPOINT> --access-key <IBM ACCESS KEY> --secret-key <IBM SECRET ACCESS KEY> --target-bucket <bucket-name>", "apiVersion: v1 kind: Secret metadata: name: <namespacestore-secret-name> type: Opaque data: IBM_COS_ACCESS_KEY_ID: <IBM COS ACCESS KEY ID ENCODED IN BASE64> IBM_COS_SECRET_ACCESS_KEY: <IBM COS SECRET ACCESS KEY ENCODED IN BASE64>", "apiVersion: noobaa.io/v1alpha1 kind: NamespaceStore metadata: finalizers: - noobaa.io/finalizer labels: app: noobaa name: <namespacestore> namespace: openshift-storage spec: s3Compatible: endpoint: <IBM COS ENDPOINT> secret: name: <backingstore-secret-name> namespace: <namespace-secret> signatureVersion: v2 targetBucket: <target-bucket> type: ibm-cos", "noobaa bucketclass create namespace-bucketclass cache <my-bucket-class> --backingstores <backing-store> --hubResource <namespacestore>", "noobaa obc create <my-bucket-claim> my-app --bucketclass <custom-bucket-class>", "oc patch -n openshift-storage storagecluster ocs-storagecluster --type merge --patch '{\"spec\": {\"multiCloudGateway\": {\"endpoints\": {\"minCount\": 3,\"maxCount\": 10}}}}'", "spec: pvPool: resources: limits: cpu: 1000m memory: 4000Mi requests: cpu: 800m memory: 800Mi storage: 50Gi", "oc get secrets/<secret_name> -o jsonpath='{.data..tls\\.crt}' | base64 -d oc get secrets/<secret_name> -o jsonpath='{.data..tls\\.key}' | base64 -d", "oc get secrets/<secret_name> -o jsonpath='{.data.cert}' | base64 -d", "oc get secret <object bucket claim name> -o jsonpath='{.data.AWS_SECRET_ACCESS_KEY}' | base64 --decode oc get secret <object bucket claim name> -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' | base64 --decode", "oc get cm <object bucket claim name> -o jsonpath='{.data.BUCKET_HOST}' oc get cm <object bucket claim name> -o jsonpath='{.data.BUCKET_PORT}' oc get cm <object bucket claim name> -o jsonpath='{.data.BUCKET_NAME}'", "oc get secret rook-ceph-object-user-<object-store-cr-name>-<object-user-cr-name> -o jsonpath='{.data.AccessKey}' | base64 --decode oc get secret rook-ceph-object-user-<object-store-cr-name>-<object-user-cr-name> -o jsonpath='{.data.SecretKey}' | base64 --decode oc get secret rook-ceph-object-user-<object-store-cr-name>-<object-user-cr-name> -o jsonpath='{.data.Endpoint}' | base64 --decode" ]
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.16/html-single/managing_hybrid_and_multicloud_resources/index
Appendix C. Publishing Module Reference
Appendix C. Publishing Module Reference Several publisher, mapper, and rule modules are configured by default with the Certificate Manager. Section C.1, "Publisher Plug-in Modules" Section C.2, "Mapper Plug-in Modules " Section C.3, "Rule Instances" C.1. Publisher Plug-in Modules This section describes the publisher modules provided for the Certificate Manager. The modules are used by the Certificate Manager to enable and configure specific publisher instances. Section C.1.1, "FileBasedPublisher" Section C.1.2, "LdapCaCertPublisher" Section C.1.3, "LdapUserCertPublisher" Section C.1.4, "LdapCrlPublisher" Section C.1.5, "LdapDeltaCrlPublisher" Section C.1.6, "LdapCertificatePairPublisher" Section C.1.7, "OCSPPublisher" C.1.1. FileBasedPublisher The FileBasedPublisher plug-in module configures a Certificate Manager to publish certificates and CRLs to file. This plug-in can publish base-64 encoded files, DER-encoded files, or both, depending on the checkboxes selected when the publisher is configured. The certificate and CRL content can be viewed by converting the files using the PrettyPrintCert and PrettyPrintCRL tools. For details on viewing the content in base-64 and DER-encoded certificates and CRLs, see Section 9.11, "Viewing Certificates and CRLs Published to File" . By default, the Certificate Manager does not create an instance of the FileBasedPublisher module. Table C.1. FileBasedPublisher Configuration Parameters Parameter Description Publisher ID Specifies a name for the publisher, an alphanumeric string with no spaces. For example, PublishCertsToFile . directory Specifies the complete path to the directory to which the Certificate Manager creates the files; the path can be an absolute path or can be relative to the Certificate System instance directory. For example, /export/CS/certificates . C.1.2. LdapCaCertPublisher The LdapCaCertPublisher plug-in module configures a Certificate Manager to publish or unpublish a CA certificate to the caCertificate;binary attribute of the CA's directory entry. The module converts the object class of the CA's entry to pkiCA or certificationAuthority , if it is not used already. Similarly, it also removes the pkiCA or certificationAuthority object class when unpublishing if the CA has no other certificates. During installation, the Certificate Manager automatically creates an instance of the LdapCaCertPublisher module for publishing the CA certificate to the directory. Table C.2. LdapCaCertPublisher Configuration Parameters Parameter Description caCertAttr Specifies the LDAP directory attribute to publish the CA certificate. This must be caCertificate;binary . caObjectClass Specifies the object class for the CA's entry in the directory. This must be pkiCA or certificationAuthority . C.1.3. LdapUserCertPublisher The LdapUserCertPublisher plug-in module configures a Certificate Manager to publish or unpublish a user certificate to the userCertificate;binary attribute of the user's directory entry. This module is used to publish any end-entity certificate to an LDAP directory. Types of end-entity certificates include SSL client, S/MIME, SSL server, and OCSP responder. During installation, the Certificate Manager automatically creates an instance of the LdapUserCertPublisher module for publishing end-entity certificates to the directory. Table C.3. LdapUserCertPublisher Configuration Parameters Parameter Description certAttr Specifies the directory attribute of the mapped entry to which the Certificate Manager should publish the certificate. This must be userCertificate;binary . C.1.4. LdapCrlPublisher The LdapCrlPublisher plug-in module configures a Certificate Manager to publish or unpublish the CRL to the certificateRevocationList;binary attribute of a directory entry. During installation, the Certificate Manager automatically creates an instance of the LdapCrlPublisher module for publishing CRLs to the directory. Table C.4. LdapCrlPublisher Configuration Parameters Parameter Description crlAttr Specifies the directory attribute of the mapped entry to which the Certificate Manager should publish the CRL. This must be certificateRevocationList;binary . C.1.5. LdapDeltaCrlPublisher The LdapDeltaCrlPublisher plug-in module configures a Certificate Manager to publish or unpublish a delta CRL to the deltaRevocationList attribute of a directory entry. During installation, the Certificate Manager automatically creates an instance of the LdapDeltaCrlPublisher module for publishing CRLs to the directory. Table C.5. LdapDeltaCrlPublisher Configuration Parameters Parameter Description crlAttr Specifies the directory attribute of the mapped entry to which the Certificate Manager should publish the delta CRL. This must be deltaRevocationList;binary . C.1.6. LdapCertificatePairPublisher The LdapCertificatePairPublisher plug-in module configures a Certificate Manager to publish or unpublish a cross-signed certificate to the crossCertPair;binary attribute of the CA's directory entry. The module also converts the object class of the CA's entry to a pkiCA or certificationAuthority , if it is not used already. Similarly, it also removes the pkiCA or certificationAuthority object class when unpublishing if the CA has no other certificates. During installation, the Certificate Manager automatically creates an instance of the LdapCertificatePairPublisher module named LdapCrossCertPairPublisher for publishing the cross-signed certificates to the directory. Table C.6. LdapCertificatePairPublisher Parameters Parameter Description crossCertPairAttr Specifies the LDAP directory attribute to publish the CA certificate. This must be crossCertificatePair;binary . caObjectClass Specifies the object class for the CA's entry in the directory. This must be pkiCA or certificationAuthority . C.1.7. OCSPPublisher The OCSPPublisher plug-in module configures a Certificate Manager to publish its CRLs to an Online Certificate Status Manager. The Certificate Manager does not create any instances of the OCSPPublisher module at installation. Table C.7. OCSPPublisher Parameters Parameter Description host Specifies the fully qualified hostname of the Online Certificate Status Manager. port Specifies the port number on which the Online Certificate Status Manager is listening to the Certificate Manager. This is the Online Certificate Status Manager's SSL port number. path Specifies the path for publishing the CRL. This must be the default path, /ocsp/agent/ocsp/addCRL . enableClientAuth Sets whether to use client (certificate-based) authentication to access the OCSP service. nickname Gives the nickname of the certificate in the OCSP service's database to use for client authentication. This is only used if the enableClientAuth option is set to true.
null
https://docs.redhat.com/en/documentation/red_hat_certificate_system/10/html/administration_guide/Publishing_Module_Reference
Chapter 15. Synchronize Sites
Chapter 15. Synchronize Sites 15.1. When to use this procedure Use this when the state of Data Grid clusters of two sites become disconnected and the contents of the caches are out-of-sync. Perform this for example after a split-brain or when one site has been taken offline for maintenance. At the end of the procedure, the data on the secondary site have been discarded and replaced by the data of the active site. All caches in the offline site are cleared to prevent invalid cache contents. 15.2. Procedures 15.2.1. Data Grid Cluster For the context of this chapter, site-a is the currently active site and site-b is an offline site that is not part of the AWS Global Accelerator EndpointGroup and is therefore not receiving user requests. Warning Transferring state may impact Data Grid cluster performance by increasing the response time and/or resources usage. The first procedure is to delete the stale data from the offline site. Login into the offline site. Shutdown Red Hat build of Keycloak. This will clear all Red Hat build of Keycloak caches and prevents the Red Hat build of Keycloak state from being out-of-sync with Data Grid. When deploying Red Hat build of Keycloak using the Red Hat build of Keycloak Operator, change the number of Red Hat build of Keycloak instances in the Red Hat build of Keycloak Custom Resource to 0. Connect into Data Grid Cluster using the Data Grid CLI tool: Command: oc -n keycloak exec -it pods/infinispan-0 -- ./bin/cli.sh --trustall --connect https://127.0.0.1:11222 It asks for the username and password for the Data Grid cluster. Those credentials are the one set in the Deploy Data Grid for HA with the Data Grid Operator chapter in the configuring credentials section. Output: Username: developer Password: [infinispan-0-29897@ISPN//containers/default]> Note The pod name depends on the cluster name defined in the Data Grid CR. The connection can be done with any pod in the Data Grid cluster. Disable the replication from offline site to the active site by running the following command. It prevents the clear request to reach the active site and delete all the correct cached data. Command: site take-offline --all-caches --site=site-a Output: { "authenticationSessions" : "ok", "work" : "ok", "loginFailures" : "ok", "actionTokens" : "ok" } Check the replication status is offline . Command: site status --all-caches --site=site-a Output: { "status" : "offline" } If the status is not offline , repeat the step. Warning Make sure the replication is offline otherwise the clear data will clear both sites. Clear all the cached data in offline site using the following commands: Command: clearcache actionTokens clearcache authenticationSessions clearcache loginFailures clearcache work These commands do not print any output. Re-enable the cross-site replication from offline site to the active site. Command: site bring-online --all-caches --site=site-a Output: { "authenticationSessions" : "ok", "work" : "ok", "loginFailures" : "ok", "actionTokens" : "ok" } Check the replication status is online . Command: site status --all-caches --site=site-a Output: { "status" : "online" } Now we are ready to transfer the state from the active site to the offline site. Login into your Active site Connect into Data Grid Cluster using the Data Grid CLI tool: Command: oc -n keycloak exec -it pods/infinispan-0 -- ./bin/cli.sh --trustall --connect https://127.0.0.1:11222 It asks for the username and password for the Data Grid cluster. Those credentials are the one set in the Deploy Data Grid for HA with the Data Grid Operator chapter in the configuring credentials section. Output: Username: developer Password: [infinispan-0-29897@ISPN//containers/default]> Note The pod name depends on the cluster name defined in the Data Grid CR. The connection can be done with any pod in the Data Grid cluster. Trigger the state transfer from the active site to the offline site. Command: site push-site-state --all-caches --site=site-b Output: { "authenticationSessions" : "ok", "work" : "ok", "loginFailures" : "ok", "actionTokens" : "ok" } Check the replication status is online for all caches. Command: site status --all-caches --site=site-b Output: { "status" : "online" } Wait for the state transfer to complete by checking the output of push-site-status command for all caches. Command: site push-site-status --cache=actionTokens site push-site-status --cache=authenticationSessions site push-site-status --cache=loginFailures site push-site-status --cache=work Output: { "site-b" : "OK" } { "site-b" : "OK" } { "site-b" : "OK" } { "site-b" : "OK" } Check the table in this section for the Cross-Site Documentation for the possible status values. If an error is reported, repeat the state transfer for that specific cache. Command: site push-site-state --cache=<cache-name> --site=site-b Clear/reset the state transfer status with the following command Command: site clear-push-site-status --cache=actionTokens site clear-push-site-status --cache=authenticationSessions site clear-push-site-status --cache=loginFailures site clear-push-site-status --cache=work Output: "ok" "ok" "ok" "ok" Now the state is available in the offline site, Red Hat build of Keycloak can be started again: Login into your secondary site. Startup Red Hat build of Keycloak. When deploying Red Hat build of Keycloak using the Red Hat build of Keycloak Operator, change the number of Red Hat build of Keycloak instances in the Red Hat build of Keycloak Custom Resource to the original value. 15.2.2. AWS Aurora Database No action required. 15.2.3. AWS Global Accelerator Once the two sites have been synchronized, it is safe to add the previously offline site back to the Global Accelerator EndpointGroup following the steps in the Bring site online chapter. 15.3. Further reading See Concepts to automate Data Grid CLI commands .
[ "-n keycloak exec -it pods/infinispan-0 -- ./bin/cli.sh --trustall --connect https://127.0.0.1:11222", "Username: developer Password: [infinispan-0-29897@ISPN//containers/default]>", "site take-offline --all-caches --site=site-a", "{ \"authenticationSessions\" : \"ok\", \"work\" : \"ok\", \"loginFailures\" : \"ok\", \"actionTokens\" : \"ok\" }", "site status --all-caches --site=site-a", "{ \"status\" : \"offline\" }", "clearcache actionTokens clearcache authenticationSessions clearcache loginFailures clearcache work", "site bring-online --all-caches --site=site-a", "{ \"authenticationSessions\" : \"ok\", \"work\" : \"ok\", \"loginFailures\" : \"ok\", \"actionTokens\" : \"ok\" }", "site status --all-caches --site=site-a", "{ \"status\" : \"online\" }", "-n keycloak exec -it pods/infinispan-0 -- ./bin/cli.sh --trustall --connect https://127.0.0.1:11222", "Username: developer Password: [infinispan-0-29897@ISPN//containers/default]>", "site push-site-state --all-caches --site=site-b", "{ \"authenticationSessions\" : \"ok\", \"work\" : \"ok\", \"loginFailures\" : \"ok\", \"actionTokens\" : \"ok\" }", "site status --all-caches --site=site-b", "{ \"status\" : \"online\" }", "site push-site-status --cache=actionTokens site push-site-status --cache=authenticationSessions site push-site-status --cache=loginFailures site push-site-status --cache=work", "{ \"site-b\" : \"OK\" } { \"site-b\" : \"OK\" } { \"site-b\" : \"OK\" } { \"site-b\" : \"OK\" }", "site push-site-state --cache=<cache-name> --site=site-b", "site clear-push-site-status --cache=actionTokens site clear-push-site-status --cache=authenticationSessions site clear-push-site-status --cache=loginFailures site clear-push-site-status --cache=work", "\"ok\" \"ok\" \"ok\" \"ok\"" ]
https://docs.redhat.com/en/documentation/red_hat_build_of_keycloak/26.0/html/high_availability_guide/operate-synchronize-
Chapter 8. Deprecated features
Chapter 8. Deprecated features This section describes the deprecated features introduced in Red Hat OpenShift Data foundation 4.14. 8.1. Red Hat Virtualization Platform Starting Red Hat OpenShift Data Foundation 4.14, OpenShift Data Foundation deployed on Installer-provisioned infrastructure (IPI) deployment of OpenShift on Red Hat Virtualization Platform (RHV) is no longer supported. For more information, see OpenShift Container Platform 4.14 release notes .
null
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.14/html/4.14_release_notes/deprecated_features
Chapter 2. Deploy OpenShift Data Foundation using local storage devices
Chapter 2. Deploy OpenShift Data Foundation using local storage devices Deploying OpenShift Data Foundation on OpenShift Container Platform using local storage devices provides you with the option to create internal cluster resources. Follow this deployment method to use local storage to back persistent volumes for your OpenShift Container Platform applications. Use this section to deploy OpenShift Data Foundation on IBM Z infrastructure where OpenShift Container Platform is already installed. 2.1. Installing Red Hat OpenShift Data Foundation Operator You can install Red Hat OpenShift Data Foundation Operator using the Red Hat OpenShift Container Platform Operator Hub. Prerequisites Access to an OpenShift Container Platform cluster using an account with cluster-admin and operator installation permissions. You must have at least three worker or infrastructure nodes in the Red Hat OpenShift Container Platform cluster. Each node should include one disk and requires 3 disks (PVs). However, one PV remains eventually unused by default. This is an expected behavior. For additional resource requirements, see the Planning your deployment guide. Important When you need to override the cluster-wide default node selector for OpenShift Data Foundation, you can use the following command to specify a blank node selector for the openshift-storage namespace (create openshift-storage namespace in this case): Taint a node as infra to ensure only Red Hat OpenShift Data Foundation resources are scheduled on that node. This helps you save on subscription costs. For more information, see the How to use dedicated worker nodes for Red Hat OpenShift Data Foundation section in the Managing and Allocating Storage Resources guide. Procedure Log in to the OpenShift Web Console. Click Operators OperatorHub . Scroll or type OpenShift Data Foundation into the Filter by keyword box to find the OpenShift Data Foundation Operator. Click Install . Set the following options on the Install Operator page: Update Channel as stable-4.13 . Installation Mode as A specific namespace on the cluster . Installed Namespace as Operator recommended namespace openshift-storage . If Namespace openshift-storage does not exist, it is created during the operator installation. Select Approval Strategy as Automatic or Manual . If you select Automatic updates, then the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. If you select Manual updates, then the OLM creates an update request. As a cluster administrator, you must then manually approve that update request to update the Operator to a newer version. Ensure that the Enable option is selected for the Console plugin . Click Install . Verification steps After the operator is successfully installed, a pop-up with a message, Web console update is available appears on the user interface. Click Refresh web console from this pop-up for the console changes to reflect. In the Web Console: Navigate to Installed Operators and verify that the OpenShift Data Foundation Operator shows a green tick indicating successful installation. Navigate to Storage and verify if the Data Foundation dashboard is available. 2.2. Installing Local Storage Operator Install the Local Storage Operator from the Operator Hub before creating Red Hat OpenShift Data Foundation clusters on local storage devices. Procedure Log in to the OpenShift Web Console. Click Operators OperatorHub . Type local storage in the Filter by keyword box to find the Local Storage Operator from the list of operators, and click on it. Set the following options on the Install Operator page: Update channel as either 4.13 or stable . Installation mode as A specific namespace on the cluster . Installed Namespace as Operator recommended namespace openshift-local-storage . Update approval as Automatic . Click Install . Verification steps Verify that the Local Storage Operator shows a green tick indicating successful installation. 2.3. Finding available storage devices (optional) This step is additional information and can be skipped as the disks are automatically discovered during storage cluster creation. Use this procedure to identify the device names for each of the three or more worker nodes that you have labeled with the OpenShift Data Foundation label cluster.ocs.openshift.io/openshift-storage='' before creating Persistent Volumes (PV) for IBM Z. Procedure List and verify the name of the worker nodes with the OpenShift Data Foundation label. Example output: Log in to each worker node that is used for OpenShift Data Foundation resources and find the unique by-id device name for each available raw block device. Example output: In this example, for bmworker01 , the available local device is sdb . Identify the unique ID for each of the devices selected in Step 2. In the above example, the ID for the local device sdb Repeat the above step to identify the device ID for all the other nodes that have the storage devices to be used by OpenShift Data Foundation. See this Knowledge Base article for more details. 2.4. Creating OpenShift Data Foundation cluster on IBM Z Use this procedure to create an OpenShift Data Foundation cluster on IBM Z. Prerequisites Ensure that all the requirements in the Requirements for installing OpenShift Data Foundation using local storage devices section are met. You must have at least three worker nodes with the same storage type and size attached to each node (for example, 200 GB) to use local storage devices on IBM Z or IBM(R) LinuxONE. Procedure In the OpenShift Web Console, click Operators Installed Operators to view all the installed operators. Ensure that the Project selected is openshift-storage . Click on the OpenShift Data Foundation operator and then click Create StorageSystem . In the Backing storage page, perform the following: Select the Create a new StorageClass using the local storage devices for Backing storage type option. Select Full Deployment for the Deployment type option. Click . Important You are prompted to install the Local Storage Operator if it is not already installed. Click Install , and follow the procedure as described in Installing Local Storage Operator . In the Create local volume set page, provide the following information: Enter a name for the LocalVolumeSet and the StorageClass . By default, the local volume set name appears for the storage class name. You can change the name. Choose one of the following: Disks on all nodes Uses the available disks that match the selected filters on all the nodes. Disks on selected nodes Uses the available disks that match the selected filters only on the selected nodes. Important The flexible scaling feature is enabled only when the storage cluster that you created with three or more nodes are spread across fewer than the minimum requirement of three availability zones. For information about flexible scaling, see knowledgebase article on Scaling OpenShift Data Foundation cluster using YAML when flexible scaling is enabled . Flexible scaling features get enabled at the time of deployment and can not be enabled or disabled later on. If the nodes selected do not match the OpenShift Data Foundation cluster requirement of an aggregated 30 CPUs and 72 GiB of RAM, a minimal cluster is deployed. For minimum starting node requirements, see the Resource requirements section in the Planning guide. From the available list of Disk Type , select SSD/NVME . Expand the Advanced section and set the following options: Volume Mode Block is selected by default. Device Type Select one or more device type from the dropdown list. Disk Size Set a minimum size of 100GB for the device and maximum available size of the device that needs to be included. Maximum Disks Limit This indicates the maximum number of PVs that can be created on a node. If this field is left empty, then PVs are created for all the available disks on the matching nodes. Click . A pop-up to confirm the creation of LocalVolumeSet is displayed. Click Yes to continue. In the Capacity and nodes page, configure the following: Available raw capacity is populated with the capacity value based on all the attached disks associated with the storage class. This takes some time to show up. The Selected nodes list shows the nodes based on the storage class. You can check the box to select Taint nodes. Click . Optional: In the Security and network page, configure the following based on your requirement: To enable encryption, select Enable data encryption for block and file storage . Choose one or both of the following Encryption level : Cluster-wide encryption Encrypts the entire cluster (block and file). StorageClass encryption Creates encrypted persistent volume (block only) using encryption enabled storage class. Select Connect to an external key management service checkbox. This is optional for cluster-wide encryption. Key Management Service Provider is set to Vault by default. Enter Vault Service Name , host Address of Vault server ('https:// <hostname or ip> ''), Port number and Token . Expand Advanced Settings to enter additional settings and certificate details based on your Vault configuration: Enter the Key Value secret path in Backend Path that is dedicated and unique to OpenShift Data Foundation. Optional: Enter TLS Server Name and Vault Enterprise Namespace . Upload the respective PEM encoded certificate file to provide CA Certificate , Client Certificate and Client Private Key . Click Save . Select Default (SDN) as Multus is not yet supported on OpenShift Data Foundation on IBM Z. Click . In the Review and create page:: Review the configuration details. To modify any configuration settings, click Back to go back to the configuration page. Click Create StorageSystem . Verification steps To verify the final Status of the installed storage cluster: In the OpenShift Web Console, navigate to Installed Operators OpenShift Data Foundation Storage System ocs-storagecluster-storagesystem Resources . Verify that Status of StorageCluster is Ready and has a green tick mark to it. To verify if flexible scaling is enabled on your storage cluster, perform the following steps: In the OpenShift Web Console, navigate to Installed Operators OpenShift Data Foundation Storage System ocs-storagecluster-storagesystem Resources ocs-storagecluster . In the YAML tab, search for the keys flexibleScaling in spec section and failureDomain in status section. If flexible scaling is true and failureDomain is set to host, flexible scaling feature is enabled. To verify that all components for OpenShift Data Foundation are successfully installed, see Verifying your OpenShift Data Foundation deployment . Additional resources To expand the capacity of the initial cluster, see the Scaling Storage guide.
[ "oc annotate namespace openshift-storage openshift.io/node-selector=", "oc get nodes -l=cluster.ocs.openshift.io/openshift-storage=", "NAME STATUS ROLES AGE VERSION bmworker01 Ready worker 6h45m v1.16.2 bmworker02 Ready worker 6h45m v1.16.2 bmworker03 Ready worker 6h45m v1.16.2", "oc debug node/<node name>", "oc debug node/bmworker01 Starting pod/bmworker01-debug To use host binaries, run `chroot /host` Pod IP: 10.0.135.71 If you don't see a command prompt, try pressing enter. sh-4.2# chroot /host sh-4.4# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT loop0 7:0 0 500G 0 loop sda 8:0 0 120G 0 disk |-sda1 8:1 0 384M 0 part /boot `-sda4 8:4 0 119.6G 0 part `-coreos-luks-root-nocrypt 253:0 0 119.6G 0 dm /sysroot sdb 8:16 0 500G 0 disk", "sh-4.4#ls -l /dev/disk/by-id/ | grep sdb lrwxrwxrwx. 1 root root 9 Feb 3 16:49 scsi-360050763808104bc2800000000000259 -> ../../sdb lrwxrwxrwx. 1 root root 9 Feb 3 16:49 scsi-SIBM_2145_00e020412f0aXX00 -> ../../sdb lrwxrwxrwx. 1 root root 9 Feb 3 16:49 scsi-0x60050763808104bc2800000000000259 -> ../../sdb", "scsi-0x60050763808104bc2800000000000259", "spec: flexibleScaling: true [...] status: failureDomain: host" ]
https://docs.redhat.com/en/documentation/red_hat_openshift_data_foundation/4.13/html/deploying_openshift_data_foundation_using_ibm_z/deploy-using-local-storage-devices-ibmz
probe::nfs.fop.release
probe::nfs.fop.release Name probe::nfs.fop.release - NFS client release page operation Synopsis nfs.fop.release Values ino inode number dev device identifier mode file mode
null
https://docs.redhat.com/en/documentation/Red_Hat_Enterprise_Linux/7/html/systemtap_tapset_reference/api-nfs-fop-release
Chapter 38. Red Hat Enterprise Linux Atomic Host
Chapter 38. Red Hat Enterprise Linux Atomic Host dracut component, BZ#1160691 Red Hat Enterprise Linux Atomic Host 7.1.0 allows configuring encrypted root installation in the Anaconda installer, but the system will not boot afterwards. Choosing this option in the installer is not recommended. dracut component, BZ# 1189407 Red Hat Enterprise Linux Atomic Host 7.1.0 offers iSCSI support during Anaconda installation, but the current content set does not include iSCSI support, so the system will not be able to access the storage. Choosing this option in the installer is not recommended. kexec-tools component, BZ#1180703 Due to some parsing problems in the code, the kdump utility currently saves the kernel crash drumps in the /sysroot/crash/ directory instead of in /var/crash/ . rhel-server-atomic component, BZ# 1186923 Red Hat Enterprise Linux Atomic Host 7.1.0 does not currently support systemtap, unless the host-kernel-matching packages which contain kernel-devel and other packages are installed into the rheltools container image. rhel-server-atomic component, BZ#1193704 Red Hat Enterprise Linux Atomic Host allocates 3GB of storage to the root partition, which includes the docker volumes. In order to support more volume space, more physical storage must be added to the system, or the root Logical Volume must be extended. The Managing Storage with Red Hat Enterprise Linux Atomic Host section from the Getting Started with Red Hat Enterprise Linux Atomic Host article describes the workaround methods for this issue. rhel-server-atomic component, BZ# 1186922 If the ltrace command is executed inside a Super-Privileged Container (SPC) to trace a process that is running on Red Hat Enterprise Linux Atomic Host, the ltrace command is unable to locate the binary images of the shared libraries that are attached to the process to be traced. As a consequence, ltrace displays a series of error messages, similar to the following example: rhel-server-atomic component, BZ#1187119 Red Hat Enterprise Linux Atomic Host does not include a mechanism to customize or override the content of the host itself, for example it does not include a tool to use a custom kernel for debugging. rhel-server-atomic component, BZ#1187119 Red Hat Enterprise Linux Atomic Host does not include a mechanism to customize or override the content of the host itself, for example it does not include a tool to use a custom kernel for debugging.
[ "Can't open /lib64/libwrap.so.0: No such file or directory Couldn't determine base address of /lib64/libwrap.so.0 ltrace: ltrace-elf.c:426: ltelf_destroy: Assertion `(&lte->plt_relocs)->elt_size == sizeof(GElf_Rela)' failed." ]
https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/7.1_release_notes/known-issues-atomic
Chapter 3. Manually upgrading using the roxctl CLI
Chapter 3. Manually upgrading using the roxctl CLI You can upgrade to the latest version of Red Hat Advanced Cluster Security for Kubernetes (RHACS) from a supported older version. Important You need to perform the manual upgrade procedure only if you used the roxctl CLI to install RHACS. There are manual steps for each version upgrade that must be followed, for example, from version 3.74 to version 4.0, and from version 4.0 to version 4.1. Therefore, Red Hat recommends upgrading first from 3.74 to 4.0, then from 4.0 to 4.1, then 4.1 to 4.2, until the selected version is installed. For full functionality, Red Hat recommends upgrading to the most recent version. To upgrade RHACS to the latest version, perform the following steps: Backup the Central database Upgrade the roxctl CLI Upgrade the Central cluster Upgrade all secured clusters 3.1. Backing up the Central database You can back up the Central database and use that backup for rolling back from a failed upgrade or data restoration in the case of an infrastructure disaster. Prerequisites You must have an API token with read permission for all resources of Red Hat Advanced Cluster Security for Kubernetes. The Analyst system role has read permissions for all resources. You have installed the roxctl CLI. You have configured the ROX_API_TOKEN and the ROX_CENTRAL_ADDRESS environment variables. Procedure Run the backup command: USD roxctl -e "USDROX_CENTRAL_ADDRESS" central backup Additional resources Authenticating by using the roxctl CLI 3.2. Upgrading the roxctl CLI To upgrade the roxctl CLI to the latest version you must uninstall the existing version of roxctl CLI and then install the latest version of the roxctl CLI. 3.2.1. Uninstalling the roxctl CLI You can uninstall the roxctl CLI binary on Linux by using the following procedure. Procedure Find and delete the roxctl binary: USD ROXPATH=USD(which roxctl) && rm -f USDROXPATH 1 1 Depending on your environment, you might need administrator rights to delete the roxctl binary. 3.2.2. Installing the roxctl CLI on Linux You can install the roxctl CLI binary on Linux by using the following procedure. Note roxctl CLI for Linux is available for amd64 , arm64 , ppc64le , and s390x architectures. Procedure Determine the roxctl architecture for the target operating system: USD arch="USD(uname -m | sed "s/x86_64//")"; arch="USD{arch:+-USDarch}" Download the roxctl CLI: USD curl -L -f -o roxctl "https://mirror.openshift.com/pub/rhacs/assets/4.5.6/bin/Linux/roxctlUSD{arch}" Make the roxctl binary executable: USD chmod +x roxctl Place the roxctl binary in a directory that is on your PATH : To check your PATH , execute the following command: USD echo USDPATH Verification Verify the roxctl version you have installed: USD roxctl version 3.2.3. Installing the roxctl CLI on macOS You can install the roxctl CLI binary on macOS by using the following procedure. Note roxctl CLI for macOS is available for amd64 and arm64 architectures. Procedure Determine the roxctl architecture for the target operating system: USD arch="USD(uname -m | sed "s/x86_64//")"; arch="USD{arch:+-USDarch}" Download the roxctl CLI: USD curl -L -f -o roxctl "https://mirror.openshift.com/pub/rhacs/assets/4.5.6/bin/Darwin/roxctlUSD{arch}" Remove all extended attributes from the binary: USD xattr -c roxctl Make the roxctl binary executable: USD chmod +x roxctl Place the roxctl binary in a directory that is on your PATH : To check your PATH , execute the following command: USD echo USDPATH Verification Verify the roxctl version you have installed: USD roxctl version 3.2.4. Installing the roxctl CLI on Windows You can install the roxctl CLI binary on Windows by using the following procedure. Note roxctl CLI for Windows is available for the amd64 architecture. Procedure Download the roxctl CLI: USD curl -f -O https://mirror.openshift.com/pub/rhacs/assets/4.5.6/bin/Windows/roxctl.exe Verification Verify the roxctl version you have installed: USD roxctl version 3.3. Upgrading the Central cluster After you have created a backup of the Central database and generated the necessary resources by using the provisioning bundle, the step is to upgrade the Central cluster. This process involves upgrading Central and Scanner. 3.3.1. Upgrading Central You can update Central to the latest version by downloading and deploying the updated images. Procedure Run the following command to update the Central image: USD oc -n stackrox set image deploy/central central=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 1 1 If you use Kubernetes, enter kubectl instead of oc . Verification Verify that the new pods have deployed: USD oc get deploy -n stackrox -o wide USD oc get pod -n stackrox --watch 3.3.1.1. Editing the GOMEMLIMIT environment variable for the Central deployment Upgrading to version 4.4 requires that you manually replace the GOMEMLIMIT environment variable with the ROX_MEMLIMIT environment variable. You must edit this variable for each deployment. Procedure Run the following command to edit the variable for the Central deployment: USD oc -n stackrox edit deploy/central 1 1 If you use Kubernetes, enter kubectl instead of oc . Replace the GOMEMLIMIT variable with ROX_MEMLIMIT . Save the file. 3.3.2. Upgrading Scanner You can update Scanner to the latest version by downloading and deploying the updated images. Procedure Run the following command to update the Scanner image: USD oc -n stackrox set image deploy/scanner scanner=registry.redhat.io/advanced-cluster-security/rhacs-scanner-rhel8:4.5.6 1 1 If you use Kubernetes, enter kubectl instead of oc . Verification Verify that the new pods have deployed: USD oc get deploy -n stackrox -o wide USD oc get pod -n stackrox --watch 3.3.2.1. Editing the GOMEMLIMIT environment variable for the Scanner deployment Upgrading to version 4.4 requires that you manually replace the GOMEMLIMIT environment variable with the ROX_MEMLIMIT environment variable. You must edit this variable for each deployment. Procedure Run the following command to edit the variable for the Scanner deployment: USD oc -n stackrox edit deploy/scanner 1 1 If you use Kubernetes, enter kubectl instead of oc . Replace the GOMEMLIMIT variable with ROX_MEMLIMIT . Save the file. 3.3.3. Verifying the Central cluster upgrade After you have upgraded both Central and Scanner, verify that the Central cluster upgrade is complete. Procedure Check the Central logs by running the following command: USD oc logs -n stackrox deploy/central -c central 1 1 If you use Kubernetes, enter kubectl instead of oc . Sample output of a successful upgrade No database restore directory found (this is not an error). Migrator: 2023/04/19 17:58:54: starting DB compaction Migrator: 2023/04/19 17:58:54: Free fraction of 0.0391 (40960/1048576) is < 0.7500. Will not compact badger 2023/04/19 17:58:54 INFO: All 1 tables opened in 2ms badger 2023/04/19 17:58:55 INFO: Replaying file id: 0 at offset: 846357 badger 2023/04/19 17:58:55 INFO: Replay took: 50.324ms badger 2023/04/19 17:58:55 DEBUG: Value log discard stats empty Migrator: 2023/04/19 17:58:55: DB is up to date. Nothing to do here. badger 2023/04/19 17:58:55 INFO: Got compaction priority: {level:0 score:1.73 dropPrefix:[]} version: 2023/04/19 17:58:55.189866 ensure.go:49: Info: Version found in the DB was current. We're good to go! 3.4. Upgrading all secured clusters After upgrading Central services, you must upgrade all secured clusters. Important If you are using automatic upgrades: Update all your secured clusters by using automatic upgrades. For information about troubleshooting problems with the automatic cluster upgrader, see Troubleshooting the cluster upgrader . Skip the instructions in this section and follow the instructions in the Verify upgrades and Revoking the API token sections. If you are not using automatic upgrades, you must run the instructions in this section on all secured clusters including the Central cluster. To ensure optimal functionality, use the same RHACS version for your secured clusters and the cluster on which Central is installed. To complete manual upgrades of each secured cluster running Sensor, Collector, and Admission controller, follow the instructions in this section. 3.4.1. Updating other images You must update the sensor, collector and compliance images on each secured cluster when not using automatic upgrades. Note If you are using Kubernetes, use kubectl instead of oc for the commands listed in this procedure. Procedure Update the Sensor image: USD oc -n stackrox set image deploy/sensor sensor=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 1 1 If you use Kubernetes, enter kubectl instead of oc . Update the Compliance image: USD oc -n stackrox set image ds/collector compliance=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 1 1 If you use Kubernetes, enter kubectl instead of oc . Update the Collector image: USD oc -n stackrox set image ds/collector collector=registry.redhat.io/advanced-cluster-security/rhacs-collector-rhel8:4.5.6 1 1 If you use Kubernetes, enter kubectl instead of oc . Note If you are using the collector slim image, run the following command instead: USD oc -n stackrox set image ds/collector collector=registry.redhat.io/advanced-cluster-security/rhacs-collector-slim-rhel8:{rhacs-version} Update the admission control image: USD oc -n stackrox set image deploy/admission-control admission-control=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 Important If you have installed RHACS on Red Hat OpenShift by using the roxctl CLI, you need to migrate the security context constraints (SCCs). For more information, see "Migrating SCCs during the manual upgrade" in the "Additional resources" section. steps Verifying secured cluster upgrade Additional resources Migrating SCCs during the manual upgrade 3.4.2. Migrating SCCs during the manual upgrade By migrating the security context constraints (SCCs) during the manual upgrade by using roxctl CLI, you can seamlessly transition the Red Hat Advanced Cluster Security for Kubernetes (RHACS) services to use the Red Hat OpenShift SCCs, ensuring compatibility and optimal security configurations across Central and all secured clusters. Procedure List all of the RHACS services that are deployed on Central and all secured clusters: USD oc -n stackrox describe pods | grep 'openshift.io/scc\|^Name:' Example output Name: admission-control-6f4dcc6b4c-2phwd openshift.io/scc: stackrox-admission-control #... Name: central-575487bfcb-sjdx8 openshift.io/scc: stackrox-central Name: central-db-7c7885bb-6bgbd openshift.io/scc: stackrox-central-db Name: collector-56nkr openshift.io/scc: stackrox-collector #... Name: scanner-68fc55b599-f2wm6 openshift.io/scc: stackrox-scanner Name: scanner-68fc55b599-fztlh #... Name: sensor-84545f86b7-xgdwf openshift.io/scc: stackrox-sensor #... In this example, you can see that each pod has its own custom SCC, which is specified through the openshift.io/scc field. Add the required roles and role bindings to use the Red Hat OpenShift SCCs instead of the RHACS custom SCCs. To add the required roles and role bindings to use the Red Hat OpenShift SCCs for the Central cluster, complete the following steps: Create a file named update-central.yaml that defines the role and role binding resources by using the following content: Example 3.1. Example YAML file apiVersion: rbac.authorization.k8s.io/v1 kind: Role 1 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: use-central-db-scc 2 namespace: stackrox 3 Rules: 4 - apiGroups: - security.openshift.io resourceNames: - nonroot-v2 resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: use-central-scc namespace: stackrox rules: - apiGroups: - security.openshift.io resourceNames: - nonroot-v2 resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: scanner app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: use-scanner-scc namespace: stackrox rules: - apiGroups: - security.openshift.io resourceNames: - nonroot-v2 resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding 5 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.k ubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: central-db-use-scc 6 namespace: stackrox roleRef: 7 apiGroup: rbac.authorization.k8s.io kind: Role name: use-central-db-scc subjects: 8 - kind: ServiceAccount name: central-db namespace: stackrox - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: central-use-scc namespace: stackrox roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: use-central-scc subjects: - kind: ServiceAccount name: central namespace: stackrox - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: scanner app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: scanner-use-scc namespace: stackrox roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: use-scanner-scc subjects: - kind: ServiceAccount name: scanner namespace: stackrox - - - 1 The type of Kubernetes resource, in this example, Role . 2 The name of the role resource. 3 The namespace in which the role is created. 4 Describes the permissions granted by the role resource. 5 The type of Kubernetes resource, in this example, RoleBinding . 6 The name of the role binding resource. 7 Specifies the role to bind in the same namespace. 8 Specifies the subjects that are bound to the role. Create the role and role binding resources specified in the update-central.yaml file by running the following command: USD oc -n stackrox create -f ./update-central.yaml To add the required roles and role bindings to use the Red Hat OpenShift SCCs for all secured clusters, complete the following steps: Create a file named upgrade-scs.yaml that defines the role and role binding resources by using the following content: Example 3.2. Example YAML file apiVersion: rbac.authorization.k8s.io/v1 kind: Role 1 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: collector app.kubernetes.io/instance: stackrox-secured-cluster-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-secured-cluster-services app.kubernetes.io/version: 4.4.0 auto-upgrade.stackrox.io/component: sensor name: use-privileged-scc 2 namespace: stackrox 3 rules: 4 - apiGroups: - security.openshift.io resourceNames: - privileged resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding 5 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: collector app.kubernetes.io/instance: stackrox-secured-cluster-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-secured-cluster-services app.kubernetes.io/version: 4.4.0 auto-upgrade.stackrox.io/component: sensor name: collector-use-scc 6 namespace: stackrox roleRef: 7 apiGroup: rbac.authorization.k8s.io kind: Role name: use-privileged-scc subjects: 8 - kind: ServiceAccount name: collector namespace: stackrox - - - 1 The type of Kubernetes resource, in this example, Role . 2 The name of the role resource. 3 The namespace in which the role is created. 4 Describes the permissions granted by the role resource. 5 The type of Kubernetes resource, in this example, RoleBinding . 6 The name of the role binding resource. 7 Specifies the role to bind in the same namespace. 8 Specifies the subjects that are bound to the role. Create the role and role binding resources specified in the upgrade-scs.yaml file by running the following command: USD oc -n stackrox create -f ./update-scs.yaml Important You must run this command on each secured cluster to create the role and role bindings specified in the upgrade-scs.yaml file. Delete the SCCs that are specific to RHACS: To delete the SCCs that are specific to the Central cluster, run the following command: USD oc delete scc/stackrox-central scc/stackrox-central-db scc/stackrox-scanner To delete the SCCs that are specific to all secured clusters, run the following command: USD oc delete scc/stackrox-admission-control scc/stackrox-collector scc/stackrox-sensor Important You must run this command on each secured cluster to delete the SCCs that are specific to each secured cluster. Verification Ensure that all the pods are using the correct SCCs by running the following command: USD oc -n stackrox describe pods | grep 'openshift.io/scc\|^Name:' Compare the output with the following table: Component custom SCC New Red Hat OpenShift 4 SCC Central stackrox-central nonroot-v2 Central-db stackrox-central-db nonroot-v2 Scanner stackrox-scanner nonroot-v2 Scanner-db stackrox-scanner nonroot-v2 Admission Controller stackrox-admission-control restricted-v2 Collector stackrox-collector privileged Sensor stackrox-sensor restricted-v2 3.4.2.1. Editing the GOMEMLIMIT environment variable for the Sensor deployment Upgrading to version 4.4 requires that you manually replace the GOMEMLIMIT environment variable with the ROX_MEMLIMIT environment variable. You must edit this variable for each deployment. Procedure Run the following command to edit the variable for the Sensor deployment: USD oc -n stackrox edit deploy/sensor 1 1 If you use Kubernetes, enter kubectl instead of oc . Replace the GOMEMLIMIT variable with ROX_MEMLIMIT . Save the file. 3.4.2.2. Editing the GOMEMLIMIT environment variable for the Collector deployment Upgrading to version 4.4 requires that you manually replace the GOMEMLIMIT environment variable with the ROX_MEMLIMIT environment variable. You must edit this variable for each deployment. Procedure Run the following command to edit the variable for the Collector deployment: USD oc -n stackrox edit deploy/collector 1 1 If you use Kubernetes, enter kubectl instead of oc . Replace the GOMEMLIMIT variable with ROX_MEMLIMIT . Save the file. 3.4.2.3. Editing the GOMEMLIMIT environment variable for the Admission Controller deployment Upgrading to version 4.4 requires that you manually replace the GOMEMLIMIT environment variable with the ROX_MEMLIMIT environment variable. You must edit this variable for each deployment. Procedure Run the following command to edit the variable for the Admission Controller deployment: USD oc -n stackrox edit deploy/admission-control 1 1 If you use Kubernetes, enter kubectl instead of oc . Replace the GOMEMLIMIT variable with ROX_MEMLIMIT . Save the file. 3.4.2.4. Verifying secured cluster upgrade After you have upgraded secured clusters, verify that the updated pods are working. Procedure Check that the new pods have deployed: USD oc get deploy,ds -n stackrox -o wide 1 1 If you use Kubernetes, enter kubectl instead of oc . USD oc get pod -n stackrox --watch 1 1 If you use Kubernetes, enter kubectl instead of oc . 3.5. Enabling RHCOS node scanning If you use OpenShift Container Platform, you can enable scanning of Red Hat Enterprise Linux CoreOS (RHCOS) nodes for vulnerabilities by using Red Hat Advanced Cluster Security for Kubernetes (RHACS). Prerequisites For scanning RHCOS node hosts of the Secured cluster, you must have installed Secured cluster on OpenShift Container Platform 4.11 or later. For information about supported platforms and architecture, see the Red Hat Advanced Cluster Security for Kubernetes Support Matrix . For life cycle support information for RHACS, see the Red Hat Advanced Cluster Security for Kubernetes Support Policy . Procedure Run one of the following commands to update the compliance container. For a default compliance container with metrics disabled, run the following command: USD oc -n stackrox patch daemonset/collector -p '{"spec":{"template":{"spec":{"containers":[{"name":"compliance","env":[{"name":"ROX_METRICS_PORT","value":"disabled"},{"name":"ROX_NODE_SCANNING_ENDPOINT","value":"127.0.0.1:8444"},{"name":"ROX_NODE_SCANNING_INTERVAL","value":"4h"},{"name":"ROX_NODE_SCANNING_INTERVAL_DEVIATION","value":"24m"},{"name":"ROX_NODE_SCANNING_MAX_INITIAL_WAIT","value":"5m"},{"name":"ROX_RHCOS_NODE_SCANNING","value":"true"},{"name":"ROX_CALL_NODE_INVENTORY_ENABLED","value":"true"}]}]}}}}' For a compliance container with Prometheus metrics enabled, run the following command: USD oc -n stackrox patch daemonset/collector -p '{"spec":{"template":{"spec":{"containers":[{"name":"compliance","env":[{"name":"ROX_METRICS_PORT","value":":9091"},{"name":"ROX_NODE_SCANNING_ENDPOINT","value":"127.0.0.1:8444"},{"name":"ROX_NODE_SCANNING_INTERVAL","value":"4h"},{"name":"ROX_NODE_SCANNING_INTERVAL_DEVIATION","value":"24m"},{"name":"ROX_NODE_SCANNING_MAX_INITIAL_WAIT","value":"5m"},{"name":"ROX_RHCOS_NODE_SCANNING","value":"true"},{"name":"ROX_CALL_NODE_INVENTORY_ENABLED","value":"true"}]}]}}}}' Update the Collector DaemonSet (DS) by taking the following steps: Add new volume mounts to Collector DS by running the following command: USD oc -n stackrox patch daemonset/collector -p '{"spec":{"template":{"spec":{"volumes":[{"name":"tmp-volume","emptyDir":{}},{"name":"cache-volume","emptyDir":{"sizeLimit":"200Mi"}}]}}}}' Add the new NodeScanner container by running the following command: USD oc -n stackrox patch daemonset/collector -p '{"spec":{"template":{"spec":{"containers":[{"command":["/scanner","--nodeinventory","--config=",""],"env":[{"name":"ROX_NODE_NAME","valueFrom":{"fieldRef":{"apiVersion":"v1","fieldPath":"spec.nodeName"}}},{"name":"ROX_CLAIR_V4_SCANNING","value":"true"},{"name":"ROX_COMPLIANCE_OPERATOR_INTEGRATION","value":"true"},{"name":"ROX_CSV_EXPORT","value":"false"},{"name":"ROX_DECLARATIVE_CONFIGURATION","value":"false"},{"name":"ROX_INTEGRATIONS_AS_CONFIG","value":"false"},{"name":"ROX_NETPOL_FIELDS","value":"true"},{"name":"ROX_NETWORK_DETECTION_BASELINE_SIMULATION","value":"true"},{"name":"ROX_NETWORK_GRAPH_PATTERNFLY","value":"true"},{"name":"ROX_NODE_SCANNING_CACHE_TIME","value":"3h36m"},{"name":"ROX_NODE_SCANNING_INITIAL_BACKOFF","value":"30s"},{"name":"ROX_NODE_SCANNING_MAX_BACKOFF","value":"5m"},{"name":"ROX_PROCESSES_LISTENING_ON_PORT","value":"false"},{"name":"ROX_QUAY_ROBOT_ACCOUNTS","value":"true"},{"name":"ROX_ROXCTL_NETPOL_GENERATE","value":"true"},{"name":"ROX_SOURCED_AUTOGENERATED_INTEGRATIONS","value":"false"},{"name":"ROX_SYSLOG_EXTRA_FIELDS","value":"true"},{"name":"ROX_SYSTEM_HEALTH_PF","value":"false"},{"name":"ROX_VULN_MGMT_WORKLOAD_CVES","value":"false"}],"image":"registry.redhat.io/advanced-cluster-security/rhacs-scanner-slim-rhel8:4.5.6","imagePullPolicy":"IfNotPresent","name":"node-inventory","ports":[{"containerPort":8444,"name":"grpc","protocol":"TCP"}],"volumeMounts":[{"mountPath":"/host","name":"host-root-ro","readOnly":true},{"mountPath":"/tmp/","name":"tmp-volume"},{"mountPath":"/cache","name":"cache-volume"}]}]}}}}' Additional resources Scanning RHCOS node hosts 3.6. Removing Central-attached PV after upgrading to version 4.1 and later Kubernetes and OpenShift Container Platform do not delete persistent volumes (PV) automatically. When you upgrade RHACS from earlier versions, the Central PV called stackrox-db remains mounted. However, in RHACS 4.1, Central does not need the previously attached PV anymore. The PV has data and persistent files used by earlier RHACS versions. You can use the PV to roll back to an earlier version before RHACS 4.1. Or, if you have a large RocksDB backup bundle for Central, you can use the PV to restore that data. After you complete the upgrade to 4.1, you can remove the Central-attached persistent volume claim (PVC) to free up the storage. Only remove the PVC if you do not plan to roll back or restore from earlier RocksDB backups. Warning After removing PVC, you cannot roll back Central to an earlier version before RHACS 4.1 or restore large RocksDB backups created with RocksDB. 3.6.1. Removing Central-attached PV using the roxctl CLI Remove the Central-attached persistent volume claim (PVC) stackrox-db to free up storage space. Procedure Run the following command: USD oc get deployment central -n stackrox -o json | jq '(.spec.template.spec.volumes[] | select(.name=="stackrox-db"))={"name": "stackrox-db", "emptyDir": {}}' | oc apply -f - It replaces the stackrox-db` entry in the spec.template.spec.volumes to a local emptyDir. Verification Run the following command: USD oc -n stackrox describe pvc stackrox-db | grep -i 'Used By' Used By: <none> 1 1 Wait until you see Used By: <none> . It might take a few minutes. 3.7. Rolling back Central You can roll back to a version of Central if the upgrade to a new version is unsuccessful. 3.7.1. Rolling back Central normally You can roll back to a version of Central if upgrading Red Hat Advanced Cluster Security for Kubernetes fails. Prerequisites Before you can perform a rollback, you must have free disk space available on your persistent storage. Red Hat Advanced Cluster Security for Kubernetes uses disk space to keep a copy of databases during the upgrade. If the disk space is not enough to store a copy and the upgrade fails, you might not be able to roll back to an earlier version. Procedure Run the following command to roll back to a version when an upgrade fails (before the Central service starts): USD oc -n stackrox rollout undo deploy/central 1 1 If you use Kubernetes, enter kubectl instead of oc . 3.7.2. Rolling back Central forcefully You can use forced rollback to roll back to an earlier version of Central (after the Central service starts). Important Using forced rollback to switch back to a version might result in loss of data and functionality. Prerequisites Before you can perform a rollback, you must have free disk space available on your persistent storage. Red Hat Advanced Cluster Security for Kubernetes uses disk space to keep a copy of databases during the upgrade. If the disk space is not enough to store a copy and the upgrade fails, you will not be able to roll back to an earlier version. Procedure Run the following commands to perform a forced rollback: To forcefully rollback to the previously installed version: USD oc -n stackrox rollout undo deploy/central 1 1 If you use Kubernetes, enter kubectl instead of oc . To forcefully rollback to a specific version: Edit Central's ConfigMap : USD oc -n stackrox edit configmap/central-config 1 1 If you use Kubernetes, enter kubectl instead of oc . Update the value of the maintenance.forceRollbackVersion key: data: central-config.yaml: | maintenance: safeMode: false compaction: enabled: true bucketFillFraction: .5 freeFractionThreshold: 0.75 forceRollbackVersion: <x.x.x.x> 1 ... 1 Specify the version that you want to roll back to. Update the Central image version: USD oc -n stackrox \ 1 set image deploy/central central=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:<x.x.x.x> 2 1 If you use Kubernetes, enter kubectl instead of oc . 2 Specify the version that you want to roll back to. It must be the same version that you specified for the maintenance.forceRollbackVersion key in the central-config config map. 3.8. Verifying upgrades The updated Sensors and Collectors continue to report the latest data from each secured cluster. The last time Sensor contacted Central is visible in the RHACS portal. Procedure In the RHACS portal, go to Platform Configuration System Health . Check to ensure that Sensor Upgrade shows clusters up to date with Central. 3.9. Revoking the API token For security reasons, Red Hat recommends that you revoke the API token that you have used to complete Central database backup. Prerequisites After the upgrade, you must reload the RHACS portal page and re-accept the certificate to continue using the RHACS portal. Procedure In the RHACS portal, go to Platform Configuration Integrations . Scroll down to the Authentication Tokens category, and click API Token . Select the checkbox in front of the token name that you want to revoke. Click Revoke . On the confirmation dialog box, click Confirm . 3.10. Troubleshooting the cluster upgrader If you encounter problems when using the legacy installation method for the secured cluster and enabling the automated updates, you can try troubleshooting the problem. The following errors can be found in the clusters view when the upgrader fails. 3.10.1. Upgrader is missing permissions Symptom The following error is displayed in the cluster page: Upgrader failed to execute PreflightStage of the roll-forward workflow: executing stage "Run preflight checks": preflight check "Kubernetes authorization" reported errors. This usually means that access is denied. Have you configured this Secured Cluster for automatically receiving upgrades?" Procedure Ensure that the bundle for the secured cluster was generated with future upgrades enabled before clicking Download YAML file and keys . If possible, remove that secured cluster and generate a new bundle making sure that future upgrades are enabled. If you cannot re-create the cluster, you can take these actions: Ensure that the service account sensor-upgrader exists in the same namespace as Sensor. Ensure that a ClusterRoleBinding exists (default name: <namespace>:upgrade-sensors ) that grants the cluster-admin ClusterRole to the sensor-upgrader service account. 3.10.2. Upgrader cannot start due to missing image Symptom The following error is displayed in the cluster page: "Upgrade initialization error: The upgrader pods have trouble pulling the new image: Error pulling image: (...) (<image_reference:tag>: not found)" Procedure Ensure that the Secured Cluster can access the registry and pull the image <image_reference:tag> . Ensure that the image pull secrets are configured correctly in the secured cluster. 3.10.3. Upgrader cannot start due to an unknown reason Symptom The following error is displayed in the cluster page: "Upgrade initialization error: Pod terminated: (Error)" Procedure Ensure that the upgrader has enough permissions for accessing the cluster objects. For more information, see "Upgrader is missing permissions". Check the upgrader logs for more insights. 3.10.3.1. Obtaining upgrader logs The logs can be accessed by running the following command: USD kubectl -n <namespace> logs deploy/sensor-upgrader 1 1 For <namespace> , specify the namespace in which Sensor is running. Usually, the upgrader deployment is only running in the cluster for a short time while doing the upgrades. It is removed later, so accessing its logs using the orchestrator CLI can require proper timing.
[ "roxctl -e \"USDROX_CENTRAL_ADDRESS\" central backup", "ROXPATH=USD(which roxctl) && rm -f USDROXPATH 1", "arch=\"USD(uname -m | sed \"s/x86_64//\")\"; arch=\"USD{arch:+-USDarch}\"", "curl -L -f -o roxctl \"https://mirror.openshift.com/pub/rhacs/assets/4.5.6/bin/Linux/roxctlUSD{arch}\"", "chmod +x roxctl", "echo USDPATH", "roxctl version", "arch=\"USD(uname -m | sed \"s/x86_64//\")\"; arch=\"USD{arch:+-USDarch}\"", "curl -L -f -o roxctl \"https://mirror.openshift.com/pub/rhacs/assets/4.5.6/bin/Darwin/roxctlUSD{arch}\"", "xattr -c roxctl", "chmod +x roxctl", "echo USDPATH", "roxctl version", "curl -f -O https://mirror.openshift.com/pub/rhacs/assets/4.5.6/bin/Windows/roxctl.exe", "roxctl version", "oc -n stackrox set image deploy/central central=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 1", "oc get deploy -n stackrox -o wide", "oc get pod -n stackrox --watch", "oc -n stackrox edit deploy/central 1", "oc -n stackrox set image deploy/scanner scanner=registry.redhat.io/advanced-cluster-security/rhacs-scanner-rhel8:4.5.6 1", "oc get deploy -n stackrox -o wide", "oc get pod -n stackrox --watch", "oc -n stackrox edit deploy/scanner 1", "oc logs -n stackrox deploy/central -c central 1", "No database restore directory found (this is not an error). Migrator: 2023/04/19 17:58:54: starting DB compaction Migrator: 2023/04/19 17:58:54: Free fraction of 0.0391 (40960/1048576) is < 0.7500. Will not compact badger 2023/04/19 17:58:54 INFO: All 1 tables opened in 2ms badger 2023/04/19 17:58:55 INFO: Replaying file id: 0 at offset: 846357 badger 2023/04/19 17:58:55 INFO: Replay took: 50.324ms badger 2023/04/19 17:58:55 DEBUG: Value log discard stats empty Migrator: 2023/04/19 17:58:55: DB is up to date. Nothing to do here. badger 2023/04/19 17:58:55 INFO: Got compaction priority: {level:0 score:1.73 dropPrefix:[]} version: 2023/04/19 17:58:55.189866 ensure.go:49: Info: Version found in the DB was current. We're good to go!", "oc -n stackrox set image deploy/sensor sensor=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 1", "oc -n stackrox set image ds/collector compliance=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6 1", "oc -n stackrox set image ds/collector collector=registry.redhat.io/advanced-cluster-security/rhacs-collector-rhel8:4.5.6 1", "oc -n stackrox set image ds/collector collector=registry.redhat.io/advanced-cluster-security/rhacs-collector-slim-rhel8:{rhacs-version}", "oc -n stackrox set image deploy/admission-control admission-control=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:4.5.6", "oc -n stackrox describe pods | grep 'openshift.io/scc\\|^Name:'", "Name: admission-control-6f4dcc6b4c-2phwd openshift.io/scc: stackrox-admission-control # Name: central-575487bfcb-sjdx8 openshift.io/scc: stackrox-central Name: central-db-7c7885bb-6bgbd openshift.io/scc: stackrox-central-db Name: collector-56nkr openshift.io/scc: stackrox-collector # Name: scanner-68fc55b599-f2wm6 openshift.io/scc: stackrox-scanner Name: scanner-68fc55b599-fztlh # Name: sensor-84545f86b7-xgdwf openshift.io/scc: stackrox-sensor #", "apiVersion: rbac.authorization.k8s.io/v1 kind: Role 1 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: use-central-db-scc 2 namespace: stackrox 3 Rules: 4 - apiGroups: - security.openshift.io resourceNames: - nonroot-v2 resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/managed-by: Helm app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: use-central-scc namespace: stackrox rules: - apiGroups: - security.openshift.io resourceNames: - nonroot-v2 resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: scanner app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: use-scanner-scc namespace: stackrox rules: - apiGroups: - security.openshift.io resourceNames: - nonroot-v2 resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding 5 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.k ubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: central-db-use-scc 6 namespace: stackrox roleRef: 7 apiGroup: rbac.authorization.k8s.io kind: Role name: use-central-db-scc subjects: 8 - kind: ServiceAccount name: central-db namespace: stackrox - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: central app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: central-use-scc namespace: stackrox roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: use-central-scc subjects: - kind: ServiceAccount name: central namespace: stackrox - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: scanner app.kubernetes.io/instance: stackrox-central-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-central-services app.kubernetes.io/version: 4.4.0 name: scanner-use-scc namespace: stackrox roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: use-scanner-scc subjects: - kind: ServiceAccount name: scanner namespace: stackrox - - -", "oc -n stackrox create -f ./update-central.yaml", "apiVersion: rbac.authorization.k8s.io/v1 kind: Role 1 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: collector app.kubernetes.io/instance: stackrox-secured-cluster-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-secured-cluster-services app.kubernetes.io/version: 4.4.0 auto-upgrade.stackrox.io/component: sensor name: use-privileged-scc 2 namespace: stackrox 3 rules: 4 - apiGroups: - security.openshift.io resourceNames: - privileged resources: - securitycontextconstraints verbs: - use - - - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding 5 metadata: annotations: email: [email protected] owner: stackrox labels: app.kubernetes.io/component: collector app.kubernetes.io/instance: stackrox-secured-cluster-services app.kubernetes.io/name: stackrox app.kubernetes.io/part-of: stackrox-secured-cluster-services app.kubernetes.io/version: 4.4.0 auto-upgrade.stackrox.io/component: sensor name: collector-use-scc 6 namespace: stackrox roleRef: 7 apiGroup: rbac.authorization.k8s.io kind: Role name: use-privileged-scc subjects: 8 - kind: ServiceAccount name: collector namespace: stackrox - - -", "oc -n stackrox create -f ./update-scs.yaml", "oc delete scc/stackrox-central scc/stackrox-central-db scc/stackrox-scanner", "oc delete scc/stackrox-admission-control scc/stackrox-collector scc/stackrox-sensor", "oc -n stackrox describe pods | grep 'openshift.io/scc\\|^Name:'", "oc -n stackrox edit deploy/sensor 1", "oc -n stackrox edit deploy/collector 1", "oc -n stackrox edit deploy/admission-control 1", "oc get deploy,ds -n stackrox -o wide 1", "oc get pod -n stackrox --watch 1", "oc -n stackrox patch daemonset/collector -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"compliance\",\"env\":[{\"name\":\"ROX_METRICS_PORT\",\"value\":\"disabled\"},{\"name\":\"ROX_NODE_SCANNING_ENDPOINT\",\"value\":\"127.0.0.1:8444\"},{\"name\":\"ROX_NODE_SCANNING_INTERVAL\",\"value\":\"4h\"},{\"name\":\"ROX_NODE_SCANNING_INTERVAL_DEVIATION\",\"value\":\"24m\"},{\"name\":\"ROX_NODE_SCANNING_MAX_INITIAL_WAIT\",\"value\":\"5m\"},{\"name\":\"ROX_RHCOS_NODE_SCANNING\",\"value\":\"true\"},{\"name\":\"ROX_CALL_NODE_INVENTORY_ENABLED\",\"value\":\"true\"}]}]}}}}'", "oc -n stackrox patch daemonset/collector -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"compliance\",\"env\":[{\"name\":\"ROX_METRICS_PORT\",\"value\":\":9091\"},{\"name\":\"ROX_NODE_SCANNING_ENDPOINT\",\"value\":\"127.0.0.1:8444\"},{\"name\":\"ROX_NODE_SCANNING_INTERVAL\",\"value\":\"4h\"},{\"name\":\"ROX_NODE_SCANNING_INTERVAL_DEVIATION\",\"value\":\"24m\"},{\"name\":\"ROX_NODE_SCANNING_MAX_INITIAL_WAIT\",\"value\":\"5m\"},{\"name\":\"ROX_RHCOS_NODE_SCANNING\",\"value\":\"true\"},{\"name\":\"ROX_CALL_NODE_INVENTORY_ENABLED\",\"value\":\"true\"}]}]}}}}'", "oc -n stackrox patch daemonset/collector -p '{\"spec\":{\"template\":{\"spec\":{\"volumes\":[{\"name\":\"tmp-volume\",\"emptyDir\":{}},{\"name\":\"cache-volume\",\"emptyDir\":{\"sizeLimit\":\"200Mi\"}}]}}}}'", "oc -n stackrox patch daemonset/collector -p '{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"command\":[\"/scanner\",\"--nodeinventory\",\"--config=\",\"\"],\"env\":[{\"name\":\"ROX_NODE_NAME\",\"valueFrom\":{\"fieldRef\":{\"apiVersion\":\"v1\",\"fieldPath\":\"spec.nodeName\"}}},{\"name\":\"ROX_CLAIR_V4_SCANNING\",\"value\":\"true\"},{\"name\":\"ROX_COMPLIANCE_OPERATOR_INTEGRATION\",\"value\":\"true\"},{\"name\":\"ROX_CSV_EXPORT\",\"value\":\"false\"},{\"name\":\"ROX_DECLARATIVE_CONFIGURATION\",\"value\":\"false\"},{\"name\":\"ROX_INTEGRATIONS_AS_CONFIG\",\"value\":\"false\"},{\"name\":\"ROX_NETPOL_FIELDS\",\"value\":\"true\"},{\"name\":\"ROX_NETWORK_DETECTION_BASELINE_SIMULATION\",\"value\":\"true\"},{\"name\":\"ROX_NETWORK_GRAPH_PATTERNFLY\",\"value\":\"true\"},{\"name\":\"ROX_NODE_SCANNING_CACHE_TIME\",\"value\":\"3h36m\"},{\"name\":\"ROX_NODE_SCANNING_INITIAL_BACKOFF\",\"value\":\"30s\"},{\"name\":\"ROX_NODE_SCANNING_MAX_BACKOFF\",\"value\":\"5m\"},{\"name\":\"ROX_PROCESSES_LISTENING_ON_PORT\",\"value\":\"false\"},{\"name\":\"ROX_QUAY_ROBOT_ACCOUNTS\",\"value\":\"true\"},{\"name\":\"ROX_ROXCTL_NETPOL_GENERATE\",\"value\":\"true\"},{\"name\":\"ROX_SOURCED_AUTOGENERATED_INTEGRATIONS\",\"value\":\"false\"},{\"name\":\"ROX_SYSLOG_EXTRA_FIELDS\",\"value\":\"true\"},{\"name\":\"ROX_SYSTEM_HEALTH_PF\",\"value\":\"false\"},{\"name\":\"ROX_VULN_MGMT_WORKLOAD_CVES\",\"value\":\"false\"}],\"image\":\"registry.redhat.io/advanced-cluster-security/rhacs-scanner-slim-rhel8:4.5.6\",\"imagePullPolicy\":\"IfNotPresent\",\"name\":\"node-inventory\",\"ports\":[{\"containerPort\":8444,\"name\":\"grpc\",\"protocol\":\"TCP\"}],\"volumeMounts\":[{\"mountPath\":\"/host\",\"name\":\"host-root-ro\",\"readOnly\":true},{\"mountPath\":\"/tmp/\",\"name\":\"tmp-volume\"},{\"mountPath\":\"/cache\",\"name\":\"cache-volume\"}]}]}}}}'", "oc get deployment central -n stackrox -o json | jq '(.spec.template.spec.volumes[] | select(.name==\"stackrox-db\"))={\"name\": \"stackrox-db\", \"emptyDir\": {}}' | oc apply -f -", "oc -n stackrox describe pvc stackrox-db | grep -i 'Used By' Used By: <none> 1", "oc -n stackrox rollout undo deploy/central 1", "oc -n stackrox rollout undo deploy/central 1", "oc -n stackrox edit configmap/central-config 1", "data: central-config.yaml: | maintenance: safeMode: false compaction: enabled: true bucketFillFraction: .5 freeFractionThreshold: 0.75 forceRollbackVersion: <x.x.x.x> 1", "oc -n stackrox \\ 1 set image deploy/central central=registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8:<x.x.x.x> 2", "Upgrader failed to execute PreflightStage of the roll-forward workflow: executing stage \"Run preflight checks\": preflight check \"Kubernetes authorization\" reported errors. This usually means that access is denied. Have you configured this Secured Cluster for automatically receiving upgrades?\"", "\"Upgrade initialization error: The upgrader pods have trouble pulling the new image: Error pulling image: (...) (<image_reference:tag>: not found)\"", "\"Upgrade initialization error: Pod terminated: (Error)\"", "kubectl -n <namespace> logs deploy/sensor-upgrader 1" ]
https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/4.5/html/upgrading/upgrade-roxctl