diff --git a/docs/cce/umn/ALL_META.TXT.json b/docs/cce/umn/ALL_META.TXT.json index b9d91c57..db107d38 100644 --- a/docs/cce/umn/ALL_META.TXT.json +++ b/docs/cce/umn/ALL_META.TXT.json @@ -45,7 +45,7 @@ "node_id":"cce_productdesc_0003.xml", "product_code":"cce", "code":"3", - "des":"CCE is a container service built on Docker and Kubernetes. A wealth of features enable you to run container clusters at scale. CCE eases containerization thanks to its re", + "des":"CCE is a container service developed on Docker and Kubernetes. It offers a wide range of features that allow you to run containers on a large scale. CCE containers are hi", "doc_type":"usermanual2", "kw":"Product Advantages,Service Overview,User Guide", "search_title":"", @@ -668,7 +668,7 @@ "node_id":"cce_bulletin_0058.xml", "product_code":"cce", "code":"37", - "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the changes made in Kubernetes 1.25 compared wi", + "des":"This section describes the changes made in Kubernetes 1.25 compared with Kubernetes 1.23.New FeaturesDeprecations and RemovalsEnhanced Kubernetes 1.25 on CCEReferencesKub", "doc_type":"usermanual2", "kw":"Kubernetes 1.25 Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", @@ -722,7 +722,7 @@ "node_id":"cce_whsnew_0010.xml", "product_code":"cce", "code":"40", - "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.Kubernetes v", + "des":"This section describes the updates in CCE Kubernetes 1.19.Kubernetes v1.19 Release NotesvSphere in-tree volumes can be migrated to vSphere CSI drivers. The in-tree vSpher", "doc_type":"usermanual2", "kw":"Kubernetes 1.19 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", @@ -740,7 +740,7 @@ "node_id":"cce_whsnew_0007.xml", "product_code":"cce", "code":"41", - "des":"CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.All resource", + "des":"This section describes the updates in CCE Kubernetes 1.17.All resources in the apps/v1beta1 and apps/v1beta2 API versions are no longer served. Migrate to use the apps/v1", "doc_type":"usermanual2", "kw":"Kubernetes 1.17 (EOM) Release Notes,Kubernetes Version Release Notes,User Guide", "search_title":"", @@ -1370,7 +1370,7 @@ "node_id":"cce_10_0435.xml", "product_code":"cce", "code":"76", - "des":"Check whether CCE can connect to your master nodes.Contact technical support.", + "des":"Check whether your master nodes can be accessed using SSH.There is a low probability that the SSH connectivity check fails due to network fluctuations. Perform the pre-up", "doc_type":"usermanual2", "kw":"SSH Connectivity of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -1658,9 +1658,9 @@ "node_id":"cce_10_0452.xml", "product_code":"cce", "code":"92", - "des":"Check whether the number of CPUs on the master node is greater than 2.If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 o", + "des":"Check and make sure that the master nodes in your cluster have more than 2 CPU cores.The number of CPU cores on the master nodes is 2, which may lead to a cluster upgrade", "doc_type":"usermanual2", - "kw":"Node CPUs,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"Node CPU Cores,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { @@ -1668,7 +1668,7 @@ "documenttype":"usermanual" } ], - "title":"Node CPUs", + "title":"Node CPU Cores", "githuburl":"" }, { @@ -1748,9 +1748,9 @@ "node_id":"cce_10_0458.xml", "product_code":"cce", "code":"97", - "des":"Before the upgrade, check whether an internal error occurs.If this check fails, contact technical support.", + "des":"This check item is not typical and implies that an internal error was found during the pre-upgrade check.Perform the pre-upgrade check again.If it fails again, submit a s", "doc_type":"usermanual2", - "kw":"Internal Errors,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"Internal Error,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { @@ -1758,7 +1758,7 @@ "documenttype":"usermanual" } ], - "title":"Internal Errors", + "title":"Internal Error", "githuburl":"" }, { @@ -1820,9 +1820,9 @@ "node_id":"cce_10_0479.xml", "product_code":"cce", "code":"101", - "des":"Check whether the current cce-controller-hpa add-on has compatibility restrictions.The current cce-controller-hpa add-on has compatibility restrictions. An add-on that ca", + "des":"Check whether there are compatibility limitations between the current and target cce-controller-hpa add-on versions.There are compatibility limitations between the curren", "doc_type":"usermanual2", - "kw":"cce-hpa-controller Restrictions,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "kw":"cce-hpa-controller Limitations,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", "metedata":[ { @@ -1830,7 +1830,7 @@ "documenttype":"usermanual" } ], - "title":"cce-hpa-controller Restrictions", + "title":"cce-hpa-controller Limitations", "githuburl":"" }, { @@ -1856,7 +1856,7 @@ "node_id":"cce_10_0484.xml", "product_code":"cce", "code":"103", - "des":"Check whether the container runtime and network components on the worker nodes are healthy.If a worker node component malfunctions, log in to the node to check the status", + "des":"Check whether the container runtime and network components on the worker nodes are healthy.Issue 1: CNI Agent is not active.If your cluster version is earlier than v1.17.", "doc_type":"usermanual2", "kw":"Health of Worker Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -1874,7 +1874,7 @@ "node_id":"cce_10_0485.xml", "product_code":"cce", "code":"104", - "des":"Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.If a master node component malfunctions, contact technical support", + "des":"Check whether cluster components such as the Kubernetes component, container runtime component, and network component are running properly before the upgrade.Perform the ", "doc_type":"usermanual2", "kw":"Health of Master Node Components,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2162,7 +2162,7 @@ "node_id":"cce_10_0501.xml", "product_code":"cce", "code":"120", - "des":"Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.If the source version of the cluster is earlier than v1.1", + "des":"Check the historical upgrade records of the cluster and confirm that the current version of the cluster meets the requirements for upgrading to the target version.Upgradi", "doc_type":"usermanual2", "kw":"Historical Upgrade Records,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2180,7 +2180,7 @@ "node_id":"cce_10_0502.xml", "product_code":"cce", "code":"121", - "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.If the CIDR block of the cluster management plane is d", + "des":"Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.The CIDR block of the management plane has been modifi", "doc_type":"usermanual2", "kw":"CIDR Block of the Cluster Management Plane,Troubleshooting for Pre-upgrade Check Exceptions,User Gui", "search_title":"", @@ -2270,7 +2270,7 @@ "node_id":"cce_10_0507.xml", "product_code":"cce", "code":"126", - "des":"Check whether swap has been enabled on cluster nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of ", + "des":"Check whether swap has been enabled on CCE nodes.By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of disa", "doc_type":"usermanual2", "kw":"Node Swap,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2283,12 +2283,30 @@ "title":"Node Swap", "githuburl":"" }, + { + "uri":"cce_10_0508.html", + "node_id":"cce_10_0508.xml", + "product_code":"cce", + "code":"127", + "des":"Check item 1: Check whether there is an Nginx Ingress route whose ingress type is not specified (kubernetes.io/ingress.class: nginx is not added to annotations) in the cl", + "doc_type":"usermanual2", + "kw":"nginx-ingress Upgrade,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", + "search_title":"", + "metedata":[ + { + "prodname":"cce", + "documenttype":"usermanual" + } + ], + "title":"nginx-ingress Upgrade", + "githuburl":"" + }, { "uri":"cce_10_0510.html", "node_id":"cce_10_0510.xml", "product_code":"cce", - "code":"127", - "des":"Check whether the service pods running on a containerd node are restarted when containerd is upgraded.Upgrade the cluster when the impact on services is controllable (for", + "code":"128", + "des":"Check whether the service pods running on a containerd node are restarted when containerd is upgraded.containerd on your node may need to be restarted. To minimize the im", "doc_type":"usermanual2", "kw":"containerd Pod Restart Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2305,7 +2323,7 @@ "uri":"cce_10_0511.html", "node_id":"cce_10_0511.xml", "product_code":"cce", - "code":"128", + "code":"129", "des":"Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.", "doc_type":"usermanual2", "kw":"Key GPU Add-on Parameters,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2323,7 +2341,7 @@ "uri":"cce_10_0512.html", "node_id":"cce_10_0512.xml", "product_code":"cce", - "code":"129", + "code":"130", "des":"Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.Upgrade the cluster when the impact on services is con", "doc_type":"usermanual2", "kw":"GPU Pod Rebuild Risks,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2341,8 +2359,8 @@ "uri":"cce_10_0513.html", "node_id":"cce_10_0513.xml", "product_code":"cce", - "code":"130", - "des":"Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are corre", + "code":"131", + "des":"Check whether ELB listener access control has been configured for the Services in the current cluster using annotations.If so, check whether their configurations are corr", "doc_type":"usermanual2", "kw":"ELB Listener Access Control,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2359,8 +2377,8 @@ "uri":"cce_10_0514.html", "node_id":"cce_10_0514.xml", "product_code":"cce", - "code":"131", - "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.Flavor inconsistency is typically due to a modification made o", + "code":"132", + "des":"Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.This issue is typically caused by modifications made to the ma", "doc_type":"usermanual2", "kw":"Master Node Flavor,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2377,8 +2395,8 @@ "uri":"cce_10_0515.html", "node_id":"cce_10_0515.xml", "product_code":"cce", - "code":"132", - "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.If the number of IP addresses in the selected cluster subnet is insuffic", + "code":"133", + "des":"Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.Rolling upgrade is not supported if there are not enough IP addresses in", "doc_type":"usermanual2", "kw":"Subnet Quota of Master Nodes,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2395,8 +2413,8 @@ "uri":"cce_10_0516.html", "node_id":"cce_10_0516.xml", "product_code":"cce", - "code":"133", - "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node is", + "code":"134", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If your node's runtime is not ", "doc_type":"usermanual2", "kw":"Node Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2413,8 +2431,8 @@ "uri":"cce_10_0517.html", "node_id":"cce_10_0517.xml", "product_code":"cce", - "code":"134", - "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If the runtime on your node po", + "code":"135", + "des":"Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.If your node pool's runtime is", "doc_type":"usermanual2", "kw":"Node Pool Runtime,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", "search_title":"", @@ -2431,7 +2449,7 @@ "uri":"cce_10_0518.html", "node_id":"cce_10_0518.xml", "product_code":"cce", - "code":"135", + "code":"136", "des":"Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions ", "doc_type":"usermanual2", "kw":"Number of Node Images,Troubleshooting for Pre-upgrade Check Exceptions,User Guide", @@ -2449,7 +2467,7 @@ "uri":"cce_10_0183.html", "node_id":"cce_10_0183.xml", "product_code":"cce", - "code":"136", + "code":"137", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Nodes", @@ -2467,7 +2485,7 @@ "uri":"cce_10_0180.html", "node_id":"cce_10_0180.xml", "product_code":"cce", - "code":"137", + "code":"138", "des":"A container cluster consists of a set of worker machines, called nodes, that run containerized applications. A node can be a virtual machine (VM) or a physical machine (P", "doc_type":"usermanual2", "kw":"paas,user group,Node Overview,Nodes,User Guide", @@ -2485,7 +2503,7 @@ "uri":"cce_10_0462.html", "node_id":"cce_10_0462.xml", "product_code":"cce", - "code":"138", + "code":"139", "des":"Container engines, one of the most important components of Kubernetes, manage the lifecycle of images and containers. The kubelet interacts with a container runtime throu", "doc_type":"usermanual2", "kw":"Container Engines,Nodes,User Guide", @@ -2503,7 +2521,7 @@ "uri":"cce_10_0476.html", "node_id":"cce_10_0476.xml", "product_code":"cce", - "code":"139", + "code":"140", "des":"This section describes the mappings between released cluster versions and OS versions.", "doc_type":"usermanual2", "kw":"Node OSs,Nodes,User Guide", @@ -2521,7 +2539,7 @@ "uri":"cce_10_0363.html", "node_id":"cce_10_0363.xml", "product_code":"cce", - "code":"140", + "code":"141", "des":"At least one cluster has been created.A key pair has been created for identity authentication upon remote node login.The DNS configuration of a subnet where a node is loc", "doc_type":"usermanual2", "kw":"Creating a Node,Nodes,User Guide", @@ -2539,7 +2557,7 @@ "uri":"cce_10_0198.html", "node_id":"cce_10_0198.xml", "product_code":"cce", - "code":"141", + "code":"142", "des":"In CCE, you can create a node (Creating a Node) or add existing nodes (ECSs) to your cluster for management.When accepting an ECS, you can reset the ECS OS to a standard ", "doc_type":"usermanual2", "kw":"Accepting Nodes for Management,Nodes,User Guide", @@ -2557,7 +2575,7 @@ "uri":"cce_10_0185.html", "node_id":"cce_10_0185.xml", "product_code":"cce", - "code":"142", + "code":"143", "des":"If you use SSH to log in to a node (an ECS), ensure that the ECS already has an EIP (a public IP address).Only login to a running ECS is allowed.Only the user linux can l", "doc_type":"usermanual2", "kw":"Logging In to a Node,Nodes,User Guide", @@ -2575,7 +2593,7 @@ "uri":"cce_10_0672.html", "node_id":"cce_10_0672.xml", "product_code":"cce", - "code":"143", + "code":"144", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"node labels", @@ -2593,7 +2611,7 @@ "uri":"cce_10_0004.html", "node_id":"cce_10_0004.xml", "product_code":"cce", - "code":"144", + "code":"145", "des":"You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.", "doc_type":"usermanual2", "kw":"node labels,Inherent Label of a Node,Managing Node Labels,Management Nodes,User Guide", @@ -2611,7 +2629,7 @@ "uri":"cce_10_0352.html", "node_id":"cce_10_0352.xml", "product_code":"cce", - "code":"145", + "code":"146", "des":"Taints enable a node to repel specific pods to prevent these pods from being scheduled to the node.On the CCE console, you can also batch manage nodes' taints.Enter the k", "doc_type":"usermanual2", "kw":"NoSchedule,PreferNoSchedule,NoExecute,System Taints,Managing Node Taints,Management Nodes,User Guide", @@ -2629,7 +2647,7 @@ "uri":"cce_10_0003.html", "node_id":"cce_10_0003.xml", "product_code":"cce", - "code":"146", + "code":"147", "des":"You can reset a node to modify the node configuration, such as the node OS and login mode.Resetting a node will reinstall the node OS and the Kubernetes software on the n", "doc_type":"usermanual2", "kw":"reset a node,Resetting a Node,Management Nodes,User Guide", @@ -2647,7 +2665,7 @@ "uri":"cce_10_0338.html", "node_id":"cce_10_0338.xml", "product_code":"cce", - "code":"147", + "code":"148", "des":"Removing a node from a cluster will re-install the node OS and clear CCE components on the node.Removing a node will not delete the server corresponding to the node. You ", "doc_type":"usermanual2", "kw":"Removing a Node,Management Nodes,User Guide", @@ -2665,7 +2683,7 @@ "uri":"cce_10_0184.html", "node_id":"cce_10_0184.xml", "product_code":"cce", - "code":"148", + "code":"149", "des":"Each node in a cluster is a cloud server or physical machine. After a cluster node is created, you can change the cloud server name or specifications as required. Modifyi", "doc_type":"usermanual2", "kw":"synchronize the ECS,Synchronizing the Data of Cloud Servers,Management Nodes,User Guide", @@ -2683,7 +2701,7 @@ "uri":"cce_10_0605.html", "node_id":"cce_10_0605.xml", "product_code":"cce", - "code":"149", + "code":"150", "des":"After you enable nodal drainage on the console, CCE configures the node to be non-schedulable and securely evicts all pods that comply with Rules for Draining Nodes on th", "doc_type":"usermanual2", "kw":"nodal drainage,nodal drainage,Draining a Node,Management Nodes,User Guide", @@ -2701,7 +2719,7 @@ "uri":"cce_10_0186.html", "node_id":"cce_10_0186.xml", "product_code":"cce", - "code":"150", + "code":"151", "des":"You can delete a pay-per-use node that is not needed from the node list.Deleting or unsubscribing from a node in a CCE cluster will release the node and services running ", "doc_type":"usermanual2", "kw":"Deleting a Node,Management Nodes,User Guide", @@ -2719,7 +2737,7 @@ "uri":"cce_10_0036.html", "node_id":"cce_10_0036.xml", "product_code":"cce", - "code":"151", + "code":"152", "des":"When a node in the cluster is stopped, all services on that node will also be stopped, and the node will no longer be available for scheduling. Check if your services wil", "doc_type":"usermanual2", "kw":"Stopping a Node,Management Nodes,User Guide", @@ -2737,7 +2755,7 @@ "uri":"cce_10_0276.html", "node_id":"cce_10_0276.xml", "product_code":"cce", - "code":"152", + "code":"153", "des":"In a rolling upgrade, a new node is created, existing workloads are migrated to the new node, and then the old node is deleted. Figure 1 shows the migration process.The o", "doc_type":"usermanual2", "kw":"Performing Rolling Upgrade for Nodes,Management Nodes,User Guide", @@ -2755,7 +2773,7 @@ "uri":"cce_10_0704.html", "node_id":"cce_10_0704.xml", "product_code":"cce", - "code":"153", + "code":"154", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node O&M", @@ -2773,7 +2791,7 @@ "uri":"cce_10_0178.html", "node_id":"cce_10_0178.xml", "product_code":"cce", - "code":"154", + "code":"155", "des":"Some node resources are used to run mandatory Kubernetes system components and resources to make the node as part of your cluster. Therefore, the total number of node res", "doc_type":"usermanual2", "kw":"total number of node resources,Node Resource Reservation Policy,Node O&M,User Guide", @@ -2791,7 +2809,7 @@ "uri":"cce_10_0341.html", "node_id":"cce_10_0341.xml", "product_code":"cce", - "code":"155", + "code":"156", "des":"This section describes how to allocate data disk space to nodes so that you can configure the data disk space accordingly.When creating a node, configure data disks for t", "doc_type":"usermanual2", "kw":"data disk space allocation,Container engine and container image space,container engine and container", @@ -2809,7 +2827,7 @@ "uri":"cce_10_0348.html", "node_id":"cce_10_0348.xml", "product_code":"cce", - "code":"156", + "code":"157", "des":"The maximum number of pods that can be created on a node is calculated based on the cluster type:When creating a cluster using a VPC network, you need to configure the nu", "doc_type":"usermanual2", "kw":"Maximum Number of Pods on a Node,maximum number of pods,Maximum Number of Pods That Can Be Created o", @@ -2827,7 +2845,7 @@ "uri":"cce_10_0883.html", "node_id":"cce_10_0883.xml", "product_code":"cce", - "code":"157", + "code":"158", "des":"To maintain the stability of nodes, CCE stores Kubernetes and container runtime components on separate data disks. Kubernetes uses the /mnt/paas/kubernetes directory, and", "doc_type":"usermanual2", "kw":"Differences Between CCE Node mountPath Configurations and Community Native Configurations,Node O&M,U", @@ -2845,7 +2863,7 @@ "uri":"cce_10_0601.html", "node_id":"cce_10_0601.xml", "product_code":"cce", - "code":"158", + "code":"159", "des":"Kubernetes has removed dockershim from v1.24 and does not support Docker by default. CCE is going to stop the support for Docker. Change the node container engine from Do", "doc_type":"usermanual2", "kw":"Migrating Nodes from Docker to containerd,Node O&M,User Guide", @@ -2863,7 +2881,7 @@ "uri":"cce_10_0659.html", "node_id":"cce_10_0659.xml", "product_code":"cce", - "code":"159", + "code":"160", "des":"The node fault detection function depends on the NPD add-on. The add-on instances run on nodes and monitor nodes. This section describes how to enable node fault detectio", "doc_type":"usermanual2", "kw":"Node Fault Detection,Check Items,Configuring Node Fault Detection Policies,Node O&M,User Guide", @@ -2881,7 +2899,7 @@ "uri":"cce_10_0035.html", "node_id":"cce_10_0035.xml", "product_code":"cce", - "code":"160", + "code":"161", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Node Pools", @@ -2899,7 +2917,7 @@ "uri":"cce_10_0081.html", "node_id":"cce_10_0081.xml", "product_code":"cce", - "code":"161", + "code":"162", "des":"CCE introduces node pools to help you better manage nodes in Kubernetes clusters. A node pool contains one node or a group of nodes with identical configuration in a clus", "doc_type":"usermanual2", "kw":"DefaultPool,DefaultPool,Deploying a Workload in a Specified Node Pool,Node Pool Overview,Node Pools,", @@ -2917,7 +2935,7 @@ "uri":"cce_10_0012.html", "node_id":"cce_10_0012.xml", "product_code":"cce", - "code":"162", + "code":"163", "des":"This section describes how to create a node pool and perform operations on the node pool. For details about how a node pool works, see Node Pool Overview.Basic SettingsCo", "doc_type":"usermanual2", "kw":"Creating a Node Pool,Node Pools,User Guide", @@ -2935,8 +2953,8 @@ "uri":"cce_10_0658.html", "node_id":"cce_10_0658.xml", "product_code":"cce", - "code":"163", - "des":"You can specify a specification in a node pool for scaling.The default node pool does not support scaling. Use Creating a Node to add a node.Number of Scaling Targets: Th", + "code":"164", + "des":"You can specify a specification in a node pool for scaling.The default node pool does not support scaling. Use Creating a Node to add a node.Add or reduce nodes for scali", "doc_type":"usermanual2", "kw":"Scaling a Node Pool,Node Pools,User Guide", "search_title":"", @@ -2953,7 +2971,7 @@ "uri":"cce_10_0222.html", "node_id":"cce_10_0222.xml", "product_code":"cce", - "code":"164", + "code":"165", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Managing a Node Pool", @@ -2971,7 +2989,7 @@ "uri":"cce_10_0653.html", "node_id":"cce_10_0653.xml", "product_code":"cce", - "code":"165", + "code":"166", "des":"Changes to the container engine, OS, or pre-/post-installation script in a node pool take effect only on new nodes. To synchronize the modification onto existing nodes, m", "doc_type":"usermanual2", "kw":"Updating a Node Pool,Managing a Node Pool,User Guide", @@ -2989,7 +3007,7 @@ "uri":"cce_10_0727.html", "node_id":"cce_10_0727.xml", "product_code":"cce", - "code":"166", + "code":"167", "des":"Auto Scaling (AS) enables elastic scaling of nodes in a node pool based on scaling policies. Without this function, you have to manually adjust the number of nodes in a n", "doc_type":"usermanual2", "kw":"Updating an AS Configuration,Managing a Node Pool,User Guide", @@ -3007,7 +3025,7 @@ "uri":"cce_10_0652.html", "node_id":"cce_10_0652.xml", "product_code":"cce", - "code":"167", + "code":"168", "des":"The default node pool does not support the following management operations.CCE allows you to highly customize Kubernetes parameter settings on core components in a cluste", "doc_type":"usermanual2", "kw":"Modifying Node Pool Configurations,Managing a Node Pool,User Guide", @@ -3025,7 +3043,7 @@ "uri":"cce_10_0886.html", "node_id":"cce_10_0886.xml", "product_code":"cce", - "code":"168", + "code":"169", "des":"If you want to add a newly created ECS to a node pool in a cluster, or remove a node from a node pool and add it to the node pool again, accept the node.When an ECS is ac", "doc_type":"usermanual2", "kw":"Accepting Nodes in a Node Pool,Managing a Node Pool,User Guide", @@ -3043,7 +3061,7 @@ "uri":"cce_10_0655.html", "node_id":"cce_10_0655.xml", "product_code":"cce", - "code":"169", + "code":"170", "des":"You can copy the configuration of an existing node pool on the CCE console to create new node pools.", "doc_type":"usermanual2", "kw":"Copying a Node Pool,Managing a Node Pool,User Guide", @@ -3061,7 +3079,7 @@ "uri":"cce_10_0654.html", "node_id":"cce_10_0654.xml", "product_code":"cce", - "code":"170", + "code":"171", "des":"After the configuration of a node pool is updated, some configurations cannot be automatically synchronized for existing nodes. You can manually synchronize configuration", "doc_type":"usermanual2", "kw":"Synchronizing Node Pools,Managing a Node Pool,User Guide", @@ -3079,7 +3097,7 @@ "uri":"cce_10_0660.html", "node_id":"cce_10_0660.xml", "product_code":"cce", - "code":"171", + "code":"172", "des":"After CCE releases a new OS image, if existing nodes cannot be automatically upgraded, you can manually upgrade them in batches.This section describes how to upgrade an O", "doc_type":"usermanual2", "kw":"Upgrading an OS,Managing a Node Pool,User Guide", @@ -3097,7 +3115,7 @@ "uri":"cce_10_0656.html", "node_id":"cce_10_0656.xml", "product_code":"cce", - "code":"172", + "code":"173", "des":"Nodes in a node pool can be migrated to the default node pool. Nodes in the default node pool or a custom node pool cannot be migrated to other custom node pools.The migr", "doc_type":"usermanual2", "kw":"Migrating a Node,Managing a Node Pool,User Guide", @@ -3115,7 +3133,7 @@ "uri":"cce_10_0657.html", "node_id":"cce_10_0657.xml", "product_code":"cce", - "code":"173", + "code":"174", "des":"Deleting a node pool will delete nodes in the pool. Pods on these nodes will be automatically migrated to available nodes in other node pools.Deleting a node pool will de", "doc_type":"usermanual2", "kw":"Deleting a Node Pool,Managing a Node Pool,User Guide", @@ -3133,7 +3151,7 @@ "uri":"cce_10_0046.html", "node_id":"cce_10_0046.xml", "product_code":"cce", - "code":"174", + "code":"175", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs", @@ -3151,7 +3169,7 @@ "uri":"cce_10_0006.html", "node_id":"cce_10_0006.xml", "product_code":"cce", - "code":"175", + "code":"176", "des":"A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is ", "doc_type":"usermanual2", "kw":"Deployments,StatefulSets,DaemonSets,jobs,cron jobs,Overview,Workloads,User Guide", @@ -3169,7 +3187,7 @@ "uri":"cce_10_0673.html", "node_id":"cce_10_0673.xml", "product_code":"cce", - "code":"176", + "code":"177", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Creating a Workload", @@ -3187,7 +3205,7 @@ "uri":"cce_10_0047.html", "node_id":"cce_10_0047.xml", "product_code":"cce", - "code":"177", + "code":"178", "des":"Deployments are workloads (for example, Nginx) that do not store any data or status. You can create Deployments on the CCE console or by running kubectl commands.Before c", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a Deployment,Creating a Workload,User Guide", @@ -3205,7 +3223,7 @@ "uri":"cce_10_0048.html", "node_id":"cce_10_0048.xml", "product_code":"cce", - "code":"178", + "code":"179", "des":"StatefulSets are a type of workloads whose data or status is stored while they are running. For example, MySQL is a StatefulSet because it needs to store new data.A conta", "doc_type":"usermanual2", "kw":"Using kubectl,Creating a StatefulSet,Creating a Workload,User Guide", @@ -3223,7 +3241,7 @@ "uri":"cce_10_0216.html", "node_id":"cce_10_0216.xml", "product_code":"cce", - "code":"179", + "code":"180", "des":"CCE provides deployment and management capabilities for multiple types of containers and supports features of container workloads, including creation, configuration, moni", "doc_type":"usermanual2", "kw":"create a workload using kubectl,Creating a DaemonSet,Creating a Workload,User Guide", @@ -3241,7 +3259,7 @@ "uri":"cce_10_0150.html", "node_id":"cce_10_0150.xml", "product_code":"cce", - "code":"180", + "code":"181", "des":"Jobs are short-lived and run for a certain time to completion. They can be executed immediately after being deployed. It is completed after it exits normally (exit 0).A j", "doc_type":"usermanual2", "kw":"Creating a Job,Creating a Workload,User Guide", @@ -3259,7 +3277,7 @@ "uri":"cce_10_0151.html", "node_id":"cce_10_0151.xml", "product_code":"cce", - "code":"181", + "code":"182", "des":"A cron job runs on a repeating schedule. You can perform time synchronization for all active nodes at a fixed time point.A cron job runs periodically at the specified tim", "doc_type":"usermanual2", "kw":"time synchronization,Creating a Cron Job,Creating a Workload,User Guide", @@ -3277,7 +3295,7 @@ "uri":"cce_10_0130.html", "node_id":"cce_10_0130.xml", "product_code":"cce", - "code":"182", + "code":"183", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Configuring a Workload", @@ -3295,7 +3313,7 @@ "uri":"cce_10_0463.html", "node_id":"cce_10_0463.xml", "product_code":"cce", - "code":"183", + "code":"184", "des":"The most significant difference is that each Kata container (pod) runs on an independent micro-VM, has an independent OS kernel, and is securely isolated at the virtualiz", "doc_type":"usermanual2", "kw":"Secure Runtime and Common Runtime,Configuring a Workload,User Guide", @@ -3313,7 +3331,7 @@ "uri":"cce_10_0354.html", "node_id":"cce_10_0354.xml", "product_code":"cce", - "code":"184", + "code":"185", "des":"When creating a workload, you can configure containers to use the same time zone as the node. You can enable time zone synchronization when creating a workload.The time z", "doc_type":"usermanual2", "kw":"Configuring Time Zone Synchronization,Configuring a Workload,User Guide", @@ -3331,7 +3349,7 @@ "uri":"cce_10_0353.html", "node_id":"cce_10_0353.xml", "product_code":"cce", - "code":"185", + "code":"186", "des":"When a workload is created, the container image is pulled from the image repository to the node. The image is also pulled when the workload is restarted or upgraded.By de", "doc_type":"usermanual2", "kw":"Configuring an Image Pull Policy,Configuring a Workload,User Guide", @@ -3349,7 +3367,7 @@ "uri":"cce_10_0009.html", "node_id":"cce_10_0009.xml", "product_code":"cce", - "code":"186", + "code":"187", "des":"CCE allows you to create workloads using images pulled from third-party image repositories.Generally, a third-party image repository can be accessed only after authentica", "doc_type":"usermanual2", "kw":"Using Third-Party Images,Configuring a Workload,User Guide", @@ -3367,7 +3385,7 @@ "uri":"cce_10_0163.html", "node_id":"cce_10_0163.xml", "product_code":"cce", - "code":"187", + "code":"188", "des":"CCE allows you to set resource requirements and limits, such as CPU and RAM, for added containers during workload creation. Kubernetes also allows using YAML to set requi", "doc_type":"usermanual2", "kw":"ephemeral storage,Configuring Container Specifications,Configuring a Workload,User Guide", @@ -3385,7 +3403,7 @@ "uri":"cce_10_0105.html", "node_id":"cce_10_0105.xml", "product_code":"cce", - "code":"188", + "code":"189", "des":"CCE provides callback functions for the lifecycle management of containerized applications. For example, if you want a container to perform a certain operation before sto", "doc_type":"usermanual2", "kw":"Startup Command,Post-Start,Pre-Stop,Configuring Container Lifecycle Parameters,Configuring a Workloa", @@ -3403,7 +3421,7 @@ "uri":"cce_10_0112.html", "node_id":"cce_10_0112.xml", "product_code":"cce", - "code":"189", + "code":"190", "des":"Health check regularly checks the health status of containers during container running. If the health check function is not configured, a pod cannot detect application ex", "doc_type":"usermanual2", "kw":"Health check,HTTP request,TCP port,CLI,Configuring Container Health Check,Configuring a Workload,Use", @@ -3421,7 +3439,7 @@ "uri":"cce_10_0113.html", "node_id":"cce_10_0113.xml", "product_code":"cce", - "code":"190", + "code":"191", "des":"An environment variable is a variable whose value can affect the way a running container will behave. You can modify environment variables even after workloads are deploy", "doc_type":"usermanual2", "kw":"Configuring Environment Variables,Configuring a Workload,User Guide", @@ -3439,7 +3457,7 @@ "uri":"cce_10_0397.html", "node_id":"cce_10_0397.xml", "product_code":"cce", - "code":"191", + "code":"192", "des":"In actual applications, upgrade is a common operation. A Deployment, StatefulSet, or DaemonSet can easily support application upgrade.You can set different upgrade polici", "doc_type":"usermanual2", "kw":"Configuring Workload Upgrade Policies,Configuring a Workload,User Guide", @@ -3457,7 +3475,7 @@ "uri":"cce_10_0232.html", "node_id":"cce_10_0232.xml", "product_code":"cce", - "code":"192", + "code":"193", "des":"Kubernetes supports node affinity and pod affinity/anti-affinity. You can configure custom rules to achieve affinity and anti-affinity scheduling. For example, you can de", "doc_type":"usermanual2", "kw":"Scheduling Policies (Affinity/Anti-affinity),Configuring a Workload,User Guide", @@ -3475,7 +3493,7 @@ "uri":"cce_10_0728.html", "node_id":"cce_10_0728.xml", "product_code":"cce", - "code":"193", + "code":"194", "des":"Tolerations allow the scheduler to schedule pods to nodes with target taints. Tolerances work with node taints. Each node allows one or more taints. If no tolerance is co", "doc_type":"usermanual2", "kw":"Configuring Tolerance Policies,Configuring a Workload,User Guide", @@ -3493,7 +3511,7 @@ "uri":"cce_10_0386.html", "node_id":"cce_10_0386.xml", "product_code":"cce", - "code":"194", + "code":"195", "des":"CCE allows you to add annotations to a YAML file to realize some advanced pod functions. The following table describes the annotations you can add.When you create a workl", "doc_type":"usermanual2", "kw":"Configuring Labels and Annotations,Configuring a Workload,User Guide", @@ -3511,7 +3529,7 @@ "uri":"cce_10_00356.html", "node_id":"cce_10_00356.xml", "product_code":"cce", - "code":"195", + "code":"196", "des":"If you encounter unexpected problems when using a container, you can log in to the container to debug it.The example output is as follows:NAME ", "doc_type":"usermanual2", "kw":"Logging In to a Container,Workloads,User Guide", @@ -3529,7 +3547,7 @@ "uri":"cce_10_0007.html", "node_id":"cce_10_0007.xml", "product_code":"cce", - "code":"196", + "code":"197", "des":"After a workload is created, you can upgrade, monitor, roll back, or delete the workload, as well as edit its YAML file.Workload/Job managementOperationDescriptionMonitor", "doc_type":"usermanual2", "kw":"Managing Workloads,Workloads,User Guide", @@ -3547,7 +3565,7 @@ "uri":"cce_10_0833.html", "node_id":"cce_10_0833.xml", "product_code":"cce", - "code":"197", + "code":"198", "des":"Custom Resource Definition (CRD) is an extension of Kubernetes APIs. When default Kubernetes resources cannot meet service requirements, you can use CRDs to define new re", "doc_type":"usermanual2", "kw":"Managing Custom Resources,Workloads,User Guide", @@ -3565,7 +3583,7 @@ "uri":"cce_10_0465.html", "node_id":"cce_10_0465.xml", "product_code":"cce", - "code":"198", + "code":"199", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Pod Security", @@ -3583,7 +3601,7 @@ "uri":"cce_10_0275.html", "node_id":"cce_10_0275.xml", "product_code":"cce", - "code":"199", + "code":"200", "des":"A pod security policy (PSP) is a cluster-level resource that controls sensitive security aspects of the pod specification. The PodSecurityPolicy object in Kubernetes defi", "doc_type":"usermanual2", "kw":"Configuring a Pod Security Policy,Pod Security,User Guide", @@ -3601,7 +3619,7 @@ "uri":"cce_10_0466.html", "node_id":"cce_10_0466.xml", "product_code":"cce", - "code":"200", + "code":"201", "des":"Before using pod security admission, understand Kubernetes Pod Security Standards. These standards define different isolation levels for pods. They let you define how you", "doc_type":"usermanual2", "kw":"Configuring Pod Security Admission,Pod Security,User Guide", @@ -3619,7 +3637,7 @@ "uri":"cce_10_0674.html", "node_id":"cce_10_0674.xml", "product_code":"cce", - "code":"201", + "code":"202", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Scheduling", @@ -3637,7 +3655,7 @@ "uri":"cce_10_0702.html", "node_id":"cce_10_0702.xml", "product_code":"cce", - "code":"202", + "code":"203", "des":"CCE supports different types of resource scheduling and task scheduling, improving application performance and overall cluster resource utilization. This section describe", "doc_type":"usermanual2", "kw":"Overview,Scheduling,User Guide", @@ -3655,7 +3673,7 @@ "uri":"cce_10_0551.html", "node_id":"cce_10_0551.xml", "product_code":"cce", - "code":"203", + "code":"204", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"CPU Scheduling", @@ -3673,7 +3691,7 @@ "uri":"cce_10_0351.html", "node_id":"cce_10_0351.xml", "product_code":"cce", - "code":"204", + "code":"205", "des":"By default, kubelet uses CFS quotas to enforce pod CPU limits. When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the", "doc_type":"usermanual2", "kw":"CPU Policy,CPU Scheduling,User Guide", @@ -3691,7 +3709,7 @@ "uri":"cce_10_0552.html", "node_id":"cce_10_0552.xml", "product_code":"cce", - "code":"205", + "code":"206", "des":"Kubernetes provides two CPU policies: none and static.none: The CPU policy is disabled by default, indicating the existing scheduling behavior.static: The static CPU core", "doc_type":"usermanual2", "kw":"Enhanced CPU Policy,CPU Scheduling,User Guide", @@ -3709,7 +3727,7 @@ "uri":"cce_10_0720.html", "node_id":"cce_10_0720.xml", "product_code":"cce", - "code":"206", + "code":"207", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"GPU Scheduling", @@ -3727,7 +3745,7 @@ "uri":"cce_10_0345.html", "node_id":"cce_10_0345.xml", "product_code":"cce", - "code":"207", + "code":"208", "des":"You can use GPUs in CCE containers.A GPU node has been created. For details, see Creating a Node.The CCE AI Suite (NVIDIA GPU) add-on has been installed. During the insta", "doc_type":"usermanual2", "kw":"Default GPU Scheduling in Kubernetes,GPU Scheduling,User Guide", @@ -3745,7 +3763,7 @@ "uri":"cce_10_0423.html", "node_id":"cce_10_0423.xml", "product_code":"cce", - "code":"208", + "code":"209", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Volcano Scheduling", @@ -3763,7 +3781,7 @@ "uri":"cce_10_0721.html", "node_id":"cce_10_0721.xml", "product_code":"cce", - "code":"209", + "code":"210", "des":"Volcano is a Kubernetes-based batch processing platform that supports machine learning, deep learning, bioinformatics, genomics, and other big data applications. It provi", "doc_type":"usermanual2", "kw":"Overview,Volcano Scheduling,User Guide", @@ -3781,7 +3799,7 @@ "uri":"cce_10_0722.html", "node_id":"cce_10_0722.xml", "product_code":"cce", - "code":"210", + "code":"211", "des":"Volcano is a Kubernetes-based batch processing platform with high-performance general computing capabilities like task scheduling engine, heterogeneous chip management, a", "doc_type":"usermanual2", "kw":"Scheduling Workloads,Volcano Scheduling,User Guide", @@ -3799,7 +3817,7 @@ "uri":"cce_10_0768.html", "node_id":"cce_10_0768.xml", "product_code":"cce", - "code":"211", + "code":"212", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Resource Usage-based Scheduling", @@ -3817,7 +3835,7 @@ "uri":"cce_10_0773.html", "node_id":"cce_10_0773.xml", "product_code":"cce", - "code":"212", + "code":"213", "des":"Bin packing is an optimization algorithm that aims to properly allocate resources to each job and get the jobs done using the minimum amount of resources. After bin packi", "doc_type":"usermanual2", "kw":"Bin Packing,Resource Usage-based Scheduling,User Guide", @@ -3835,7 +3853,7 @@ "uri":"cce_10_0766.html", "node_id":"cce_10_0766.xml", "product_code":"cce", - "code":"213", + "code":"214", "des":"Scheduling in a cluster is the process of binding pending pods to nodes, and is performed by a component called kube-scheduler or Volcano Scheduler. The scheduler uses a ", "doc_type":"usermanual2", "kw":"Descheduling,Resource Usage-based Scheduling,User Guide", @@ -3853,7 +3871,7 @@ "uri":"cce_10_0767.html", "node_id":"cce_10_0767.xml", "product_code":"cce", - "code":"214", + "code":"215", "des":"In scenarios such as node pool replacement and rolling node upgrade, an old resource pool needs to be replaced with a new one. To prevent the node pool replacement from a", "doc_type":"usermanual2", "kw":"Node Pool Affinity,Resource Usage-based Scheduling,User Guide", @@ -3871,7 +3889,7 @@ "uri":"cce_10_0789.html", "node_id":"cce_10_0789.xml", "product_code":"cce", - "code":"215", + "code":"216", "des":"Volcano Scheduler offers CPU and memory load-aware scheduling for pods and preferentially schedules pods to the node with the lightest load to balance node loads. This pr", "doc_type":"usermanual2", "kw":"Load-aware Scheduling,Resource Usage-based Scheduling,User Guide", @@ -3889,7 +3907,7 @@ "uri":"cce_10_0813.html", "node_id":"cce_10_0813.xml", "product_code":"cce", - "code":"216", + "code":"217", "des":"Volcano scheduling involves node filtering and scoring, which is used to filter the nodes meeting scheduling conditions and score the filtered nodes to find the one with ", "doc_type":"usermanual2", "kw":"Configuration Cases for Resource Usage-based Scheduling,Resource Usage-based Scheduling,User Guide", @@ -3907,7 +3925,7 @@ "uri":"cce_10_0774.html", "node_id":"cce_10_0774.xml", "product_code":"cce", - "code":"217", + "code":"218", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Priority-based Scheduling", @@ -3925,7 +3943,7 @@ "uri":"cce_10_0775.html", "node_id":"cce_10_0775.xml", "product_code":"cce", - "code":"218", + "code":"219", "des":"A pod priority indicates the importance of a pod relative to other pods. Volcano supports pod PriorityClasses in Kubernetes. After PriorityClasses are configured, the sch", "doc_type":"usermanual2", "kw":"Priority-based Scheduling,Priority-based Scheduling,User Guide", @@ -3943,7 +3961,7 @@ "uri":"cce_10_0776.html", "node_id":"cce_10_0776.xml", "product_code":"cce", - "code":"219", + "code":"220", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"AI Performance-based Scheduling", @@ -3961,7 +3979,7 @@ "uri":"cce_10_0777.html", "node_id":"cce_10_0777.xml", "product_code":"cce", - "code":"220", + "code":"221", "des":"Dominant Resource Fairness (DRF) is a scheduling algorithm based on the dominant resource of a container group. DRF scheduling can be used to enhance the service throughp", "doc_type":"usermanual2", "kw":"DRF,AI Performance-based Scheduling,User Guide", @@ -3979,7 +3997,7 @@ "uri":"cce_10_0778.html", "node_id":"cce_10_0778.xml", "product_code":"cce", - "code":"221", + "code":"222", "des":"Gang scheduling is a scheduling algorithm that schedules correlated processes or threads to run simultaneously on different processors. It meets the scheduling requiremen", "doc_type":"usermanual2", "kw":"Gang,AI Performance-based Scheduling,User Guide", @@ -3997,7 +4015,7 @@ "uri":"cce_10_0425.html", "node_id":"cce_10_0425.xml", "product_code":"cce", - "code":"222", + "code":"223", "des":"When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduli", "doc_type":"usermanual2", "kw":"NUMA Affinity Scheduling,Volcano Scheduling,User Guide", @@ -4015,7 +4033,7 @@ "uri":"cce_10_0709.html", "node_id":"cce_10_0709.xml", "product_code":"cce", - "code":"223", + "code":"224", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Cloud Native Hybrid Deployment", @@ -4033,7 +4051,7 @@ "uri":"cce_10_0384.html", "node_id":"cce_10_0384.xml", "product_code":"cce", - "code":"224", + "code":"225", "des":"Many services see surges in traffic. To ensure performance and stability, resources are often requested at the maximum needed. However, the surges may ebb very shortly an", "doc_type":"usermanual2", "kw":"Dynamic Resource Oversubscription,Cloud Native Hybrid Deployment,User Guide", @@ -4051,7 +4069,7 @@ "uri":"cce_10_0020.html", "node_id":"cce_10_0020.xml", "product_code":"cce", - "code":"225", + "code":"226", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Network", @@ -4069,7 +4087,7 @@ "uri":"cce_10_0010.html", "node_id":"cce_10_0010.xml", "product_code":"cce", - "code":"226", + "code":"227", "des":"You can learn about a cluster network from the following two aspects:What is a cluster network like? A cluster consists of multiple nodes, and pods (or containers) are ru", "doc_type":"usermanual2", "kw":"Overview,Network,User Guide", @@ -4087,7 +4105,7 @@ "uri":"cce_10_0280.html", "node_id":"cce_10_0280.xml", "product_code":"cce", - "code":"227", + "code":"228", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Container Network", @@ -4105,7 +4123,7 @@ "uri":"cce_10_0281.html", "node_id":"cce_10_0281.xml", "product_code":"cce", - "code":"228", + "code":"229", "des":"The container network assigns IP addresses to pods in a cluster and provides networking services. In CCE, you can select the following network models for your cluster:Clo", "doc_type":"usermanual2", "kw":"Overview,Container Network,User Guide", @@ -4123,7 +4141,7 @@ "uri":"cce_10_0678.html", "node_id":"cce_10_0678.xml", "product_code":"cce", - "code":"229", + "code":"230", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Cloud Native Network 2.0 Settings", @@ -4141,7 +4159,7 @@ "uri":"cce_10_0284.html", "node_id":"cce_10_0284.xml", "product_code":"cce", - "code":"230", + "code":"231", "des":"Cloud Native 2.0 network model is a proprietary, next-generation container network model that combines the elastic network interfaces (ENIs) and supplementary network int", "doc_type":"usermanual2", "kw":"Cloud Native 2.0 Network Model,Cloud Native Network 2.0 Settings,User Guide", @@ -4159,7 +4177,7 @@ "uri":"cce_10_0906.html", "node_id":"cce_10_0906.xml", "product_code":"cce", - "code":"231", + "code":"232", "des":"If the pod subnet configured during CCE Turbo cluster creation cannot meet service expansion requirements, you can add a pod subnet for the cluster.This function is avail", "doc_type":"usermanual2", "kw":"Adding a Pod Subnet for a Cluster,Cloud Native Network 2.0 Settings,User Guide", @@ -4177,7 +4195,7 @@ "uri":"cce_10_0897.html", "node_id":"cce_10_0897.xml", "product_code":"cce", - "code":"232", + "code":"233", "des":"In Cloud Native 2.0 network mode, pods use ENIs or sub-ENIs of the VPC. You can configure a security group for a pod using a pod's annotation.Configure a security group i", "doc_type":"usermanual2", "kw":"Binding a Security Group to a Pod Using an Annotation,Cloud Native Network 2.0 Settings,User Guide", @@ -4195,7 +4213,7 @@ "uri":"cce_10_0288.html", "node_id":"cce_10_0288.xml", "product_code":"cce", - "code":"233", + "code":"234", "des":"In Cloud Native Network 2.0, pods use VPC ENIs or sub-ENIs for networking. You can directly bind security groups and EIPs to pods. To bind CCE pods with security groups, ", "doc_type":"usermanual2", "kw":"Binding a Security Group to a Workload Using a Security Group Policy,Cloud Native Network 2.0 Settin", @@ -4213,7 +4231,7 @@ "uri":"cce_10_0196.html", "node_id":"cce_10_0196.xml", "product_code":"cce", - "code":"234", + "code":"235", "des":"In a CCE Turbo cluster, you can configure subnets and security groups for containers by namespace or workload using NetworkAttachmentDefinition CRDs. If you want to confi", "doc_type":"usermanual2", "kw":"Binding a Subnet and Security Group to a Namespace or Workload Using a Container Network Configurati", @@ -4231,7 +4249,7 @@ "uri":"cce_10_0603.html", "node_id":"cce_10_0603.xml", "product_code":"cce", - "code":"235", + "code":"236", "des":"In Cloud Native Network 2.0, each pod is associated with an ENI, providing a static IP address to the StatefulSet pods (container ENI). This is a common practice in acces", "doc_type":"usermanual2", "kw":"Configuring a Static IP Address for a Pod,Cloud Native Network 2.0 Settings,User Guide", @@ -4249,7 +4267,7 @@ "uri":"cce_10_0734.html", "node_id":"cce_10_0734.xml", "product_code":"cce", - "code":"236", + "code":"237", "des":"In Cloud Native Network 2.0, pods use VPC ENIs or sub-ENIs for networking. You can directly bind EIPs to pods.To associate an EIP with a pod, simply set the value of the ", "doc_type":"usermanual2", "kw":"Configuring an EIP for a Pod,Cloud Native Network 2.0 Settings,User Guide", @@ -4267,7 +4285,7 @@ "uri":"cce_10_0651.html", "node_id":"cce_10_0651.xml", "product_code":"cce", - "code":"237", + "code":"238", "des":"In Cloud Native Network 2.0, static public IP addresses (EIPs) can be assigned to StatefulSets or pods created directly.You can configure a static EIP for a pod only in C", "doc_type":"usermanual2", "kw":"static EIPs,Configuring a Static EIP for a Pod,Cloud Native Network 2.0 Settings,User Guide", @@ -4285,7 +4303,7 @@ "uri":"cce_10_0604.html", "node_id":"cce_10_0604.xml", "product_code":"cce", - "code":"238", + "code":"239", "des":"By default, pods with IPv6 dual-stack ENIs can access only the IPv6 private network. To access the public network, configure shared bandwidth for such pods.Only CCE Turbo", "doc_type":"usermanual2", "kw":"Configuring Shared Bandwidth for a Pod with IPv6 Dual-Stack ENIs,Cloud Native Network 2.0 Settings,U", @@ -4303,7 +4321,7 @@ "uri":"cce_10_0904.html", "node_id":"cce_10_0904.xml", "product_code":"cce", - "code":"239", + "code":"240", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"VPC Network Settings", @@ -4321,7 +4339,7 @@ "uri":"cce_10_0283.html", "node_id":"cce_10_0283.xml", "product_code":"cce", - "code":"240", + "code":"241", "des":"The VPC network model seamlessly combines VPC routing with the underlying network, making it ideal for high-performance scenarios. However, the maximum number of nodes al", "doc_type":"usermanual2", "kw":"VPC Network Model,VPC Network Settings,User Guide", @@ -4339,7 +4357,7 @@ "uri":"cce_10_0680.html", "node_id":"cce_10_0680.xml", "product_code":"cce", - "code":"241", + "code":"242", "des":"If the container CIDR block configured during CCE cluster creation cannot meet service expansion requirements, you can add a container CIDR block for the cluster.This fun", "doc_type":"usermanual2", "kw":"Adding a Container CIDR Block for a Cluster,VPC Network Settings,User Guide", @@ -4357,7 +4375,7 @@ "uri":"cce_10_0677.html", "node_id":"cce_10_0677.xml", "product_code":"cce", - "code":"242", + "code":"243", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Tunnel Network Settings", @@ -4375,7 +4393,7 @@ "uri":"cce_10_0282.html", "node_id":"cce_10_0282.xml", "product_code":"cce", - "code":"243", + "code":"244", "des":"A container tunnel network creates a separate network plane for containers by using tunnel encapsulation on the host network plane. The container tunnel network of a CCE ", "doc_type":"usermanual2", "kw":"Tunnel Network Model,Tunnel Network Settings,User Guide", @@ -4393,7 +4411,7 @@ "uri":"cce_10_0059.html", "node_id":"cce_10_0059.xml", "product_code":"cce", - "code":"244", + "code":"245", "des":"Network policies are designed by Kubernetes to restrict pod access. It is equivalent to a firewall at the application layer to enhance network security. The capabilities ", "doc_type":"usermanual2", "kw":"Configuring Network Policies to Restrict Pod Access,Tunnel Network Settings,User Guide", @@ -4411,7 +4429,7 @@ "uri":"cce_10_0675.html", "node_id":"cce_10_0675.xml", "product_code":"cce", - "code":"245", + "code":"246", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Pod Network Settings", @@ -4429,7 +4447,7 @@ "uri":"cce_10_0402.html", "node_id":"cce_10_0402.xml", "product_code":"cce", - "code":"246", + "code":"247", "des":"Kubernetes allows pods to directly use the host/node network. When a pod is configured with hostNetwork: true, applications running in the pod can directly view the netwo", "doc_type":"usermanual2", "kw":"Configuring hostNetwork for Pods,Pod Network Settings,User Guide", @@ -4447,7 +4465,7 @@ "uri":"cce_10_0382.html", "node_id":"cce_10_0382.xml", "product_code":"cce", - "code":"247", + "code":"248", "des":"Bandwidth preemption occurs between different containers deployed on the same node, which may cause service jitter. You can configure QoS rate limiting for inter-pod acce", "doc_type":"usermanual2", "kw":"Configuring QoS for a Pod,Pod Network Settings,User Guide", @@ -4465,7 +4483,7 @@ "uri":"cce_10_0247.html", "node_id":"cce_10_0247.xml", "product_code":"cce", - "code":"248", + "code":"249", "des":"HUAWEI CLOUD Help Center presents technical documents to help you quickly get started with HUAWEI CLOUD services. The technical documents include Service Overview, Price Details, Purchase Guide, User Guide, API Reference, Best Practices, FAQs, and Videos.", "doc_type":"usermanual2", "kw":"Service", @@ -4483,7 +4501,7 @@ "uri":"cce_10_0249.html", "node_id":"cce_10_0249.xml", "product_code":"cce", - "code":"249", + "code":"250", "des":"After a pod is created, the following problems may occur if you directly access the pod:The pod can be deleted and recreated at any time by a controller such as a Deploym", "doc_type":"usermanual2", "kw":"Overview,Service,User Guide", @@ -4501,7 +4519,7 @@ "uri":"cce_10_0011.html", "node_id":"cce_10_0011.xml", "product_code":"cce", - "code":"250", + "code":"251", "des":"ClusterIP Services allow workloads in the same cluster to use their cluster-internal domain names to access each other.The cluster-internal domain name format is -

2024-08-30

+

2024-11-25

+ +

Update:

+

Updated the Specifications field in Creating a Node and Creating a Node Pool.

+ + +

2024-10-30

+ +

Add:

+

Added NGINX Ingress Controller.

+

Added Nginx Ingresses.

+

Added nginx-ingress Upgrade.

+

Update:

+ + + +

2024-08-30

Update:

diff --git a/docs/cce/umn/cce_10_0004.html b/docs/cce/umn/cce_10_0004.html index 5ddb441b..c95b7edb 100644 --- a/docs/cce/umn/cce_10_0004.html +++ b/docs/cce/umn/cce_10_0004.html @@ -1,7 +1,8 @@

Managing Node Labels

-

Node Label Usage Scenario

Node labels are mainly used in the following scenarios:

+

You can add different labels to nodes and define different attributes for labels. By using these node labels, you can quickly understand the characteristics of each node.

+

Node Label Usage Scenario

Node labels are mainly used in the following scenarios:

  • Node management: Node labels are used to classify nodes.
  • Node affinity or anti-affinity for workloads: By adding labels to nodes, you can schedule pods to specific nodes through node affinity or prevent pods from being scheduled to specific nodes through node anti-affinity. For details, see Scheduling Policies (Affinity/Anti-affinity).

Inherent Label of a Node

After a node is created, some fixed labels exist and cannot be deleted. For details about these labels, see Table 1.

diff --git a/docs/cce/umn/cce_10_0006.html b/docs/cce/umn/cce_10_0006.html index d55f867c..0a4efc9c 100644 --- a/docs/cce/umn/cce_10_0006.html +++ b/docs/cce/umn/cce_10_0006.html @@ -1,7 +1,8 @@

Overview

-

CCE provides Kubernetes-native container deployment and management and supports lifecycle management of container workloads, including creation, configuration, monitoring, auto scaling, upgrade, uninstall, service discovery, and load balancing.

+

A workload is an application running on Kubernetes. No matter how many components are there in your workload, you can run it in a group of Kubernetes pods. A workload is an abstract model of a group of pods in Kubernetes. Workloads in Kubernetes are classified as Deployments, StatefulSets, DaemonSets, jobs, and cron jobs.

+

CCE provides Kubernetes-native container deployment and management and supports lifecycle management of container workloads, including creation, configuration, monitoring, auto scaling, upgrade, uninstall, service discovery, and load balancing.

Overview of Pod

A pod is the smallest and simplest unit in the Kubernetes object model that you create or deploy. A pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers. Each pod has a separate IP address.

Pods can be used in either of the following ways:

  • A pod runs only one container. This is the most common usage of pods in Kubernetes. You can consider a pod as a container, but Kubernetes directly manages pods instead of containers.
  • A pod runs multiple containers that need to be tightly coupled. In this scenario, a pod contains a main container and several sidecar containers, as shown in Figure 1. For example, the main container is a web server that provides file services from a fixed directory, and sidecar containers periodically download files to this fixed directory.
    Figure 1 Pod running multiple containers
    diff --git a/docs/cce/umn/cce_10_0011.html b/docs/cce/umn/cce_10_0011.html index 89d12a2e..2d2735ed 100644 --- a/docs/cce/umn/cce_10_0011.html +++ b/docs/cce/umn/cce_10_0011.html @@ -6,7 +6,7 @@

    Figure 1 shows the mapping relationships between access channels, container ports, and access ports.

    Figure 1 Intra-cluster access (ClusterIP)
-

Creating a ClusterIP Service

  1. Log in to the CCE console and click the cluster name to access the cluster console.
  2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
  3. Configure intra-cluster access parameters.

    • Service Name: Specify a Service name, which can be the same as the workload name.
    • Service Type: Select ClusterIP.
    • Namespace: namespace that the workload belongs to.
    • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
    • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
    • Ports
      • Protocol: protocol used by the Service.
      • Service Port: port used by the Service. The port number ranges from 1 to 65535.
      • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.
      +

      Creating a ClusterIP Service

      1. Log in to the CCE console and click the cluster name to access the cluster console.
      2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
      3. Configure intra-cluster access parameters.

        • Service Name: Specify a Service name, which can be the same as the workload name.
        • Service Type: Select ClusterIP.
        • Namespace: namespace that the workload belongs to.
        • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
        • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
        • Ports
          • Protocol: protocol used by the Service.
          • Service Port: port used by the Service. The port number ranges from 1 to 65535.
          • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.

      4. Click OK.
      diff --git a/docs/cce/umn/cce_10_0012.html b/docs/cce/umn/cce_10_0012.html index 164f3039..4b8b87c8 100644 --- a/docs/cce/umn/cce_10_0012.html +++ b/docs/cce/umn/cce_10_0012.html @@ -16,11 +16,6 @@

      Name of a node pool. By default, the name is in the format of Cluster name-nodepool-Random number. If you do not want to use the default name format, you can customize the name.

      -

      Expected Initial Nodes

      - -

      Number of nodes to be created in this node pool. A maximum of 50 nodes that can be created at a time.

      - -
@@ -43,7 +38,7 @@

Specifications

-
Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console.
NOTE:
  • If a node pool is configured with multiple node flavors, only the flavors (which can be located in different AZs) of the same node type are supported. For example, a node pool consisting of general computing-plus nodes supports only general computing-plus node flavors, but not the flavors of general computing nodes.
  • A maximum of 10 node flavors can be added to a node pool (the flavors in different AZs are counted separately). When adding a node flavor, you can choose multiple AZs, but you need to specify them.
  • Nodes in a newly created node pool are created using the default flavor. If the resources for the default flavor are insufficient, node creation will fail.
  • After a node pool is created, the flavors of existing nodes cannot be deleted.
+
Select a node flavor based on service requirements. The available node flavors vary depending on regions or AZs. For details, see the CCE console. For the supported node flavors, see Node Flavor Description.
NOTE:
  • If a node pool is configured with multiple node flavors, only the flavors (which can be located in different AZs) of the same node type are supported. For example, a node pool consisting of general computing-plus nodes supports only general computing-plus node flavors, but not the flavors of general computing nodes.
  • A maximum of 10 node flavors can be added to a node pool (the flavors in different AZs are counted separately). When adding a node flavor, you can choose multiple AZs, but you need to specify them.
  • Nodes in a newly created node pool are created using the default flavor. If the resources for the default flavor are insufficient, node creation will fail.
  • After a node pool is created, the flavors of existing nodes cannot be deleted.
@@ -95,10 +90,10 @@

Advanced Settings

Expand the area and configure the following parameters:

-
  • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
  • Data Disk Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. BMS nodes do not support data disk encryption that is available only in certain regions. For details, see the console.
    • Not encrypted is selected by default.
    • If you select Enabled (key) for Data Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
    • If you select Enabled (KMS key ID) for Data Disk Encryption, enter a KMS key (which can be shared by others) in the current region.
    +
    • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
    • Data Disk Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting.
      • Not encrypted is selected by default.
      • If you select Enabled (key) for Data Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
      • If you select Enabled (KMS key ID) for Data Disk Encryption, enter a KMS key (which can be shared by others) in the current region.

    Adding data disks

    -

    A maximum of 16 data disks can be attached to an ECS and 10 to a BMS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

    +

    A maximum of 16 data disks can be attached to an ECS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

    • Default: By default, a raw disk is created without any processing.
    • Mount Disk: The data disk is attached to a specified directory.
    • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
    • Use as ephemeral volume: applicable when there is a high performance requirement on emptyDir.
    NOTE:
    • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
    • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
    @@ -138,7 +133,7 @@

    Security group used by the nodes created in the node pool. A maximum of five security groups can be selected.

    When a cluster is created, a node security group named {Cluster name}-cce-node-{Random ID} is created and used by default.

    -

    Traffic needs to pass through certain ports in the node security group to ensure node communications. Ensure that you have enabled these ports if you select another security group.

    +

    Traffic needs to pass through certain ports in the node security group to ensure node communications. Ensure that you have enabled these ports if you select another security group. For details, see Configuring Cluster Security Group Rules.

    NOTE:

    After a node pool is created, its associated security group cannot be modified.

    diff --git a/docs/cce/umn/cce_10_0015.html b/docs/cce/umn/cce_10_0015.html index 61939768..bd0094c6 100644 --- a/docs/cce/umn/cce_10_0015.html +++ b/docs/cce/umn/cce_10_0015.html @@ -1,7 +1,8 @@

    Using a ConfigMap

    -
    +

    After a ConfigMap is created, it can be used in three workload scenarios: environment variables, command line parameters, and data volumes.

    +

    The following example shows how to use a ConfigMap.

    apiVersion: v1
     kind: ConfigMap
    diff --git a/docs/cce/umn/cce_10_0016.html b/docs/cce/umn/cce_10_0016.html
    index ef23fe95..2dfc1e40 100644
    --- a/docs/cce/umn/cce_10_0016.html
    +++ b/docs/cce/umn/cce_10_0016.html
    @@ -1,7 +1,8 @@
     
     
     

    Using a Secret

    -

    Do not perform any operation on the following secrets. For details, see Cluster Secrets.

    +

    After secrets are created, they can be mounted as data volumes or be exposed as environment variables to be used by a container in a pod.

    +

    Do not perform any operation on the following secrets. For details, see Cluster Secrets.

    • Do not operate secrets under kube-system.
    • Do not operate default-secret and paas.elb in any of the namespaces. The default-secret is used to pull the private image of SWR, and the paas.elb is used to connect the service in the namespace to the ELB service.
    diff --git a/docs/cce/umn/cce_10_0018.html b/docs/cce/umn/cce_10_0018.html index 1c7b087d..614dc8e8 100644 --- a/docs/cce/umn/cce_10_0018.html +++ b/docs/cce/umn/cce_10_0018.html @@ -155,8 +155,8 @@ spec:

    Extended host path

    Extended host paths contain pod IDs or container names to distinguish different containers into which the host path is mounted.

    -

    A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

    -
    • None: No extended path is configured.
    • PodUID: ID of a pod.
    • PodName: name of a pod.
    • PodUID/ContainerName: ID of a pod or name of a container.
    • PodName/ContainerName: name of a pod or container.
    +

    A level-3 directory is added to the original volume directory/subdirectory. You can easily obtain the files output by a single Pod.

    +
    • None: No extended path is configured.
    • PodUID: ID of a pod.
    • PodName: name of a pod.
    • PodUID/ContainerName: ID of a pod or name of a container.
    • PodName/ContainerName: name of a pod or container.

    policy.logs.rotate

    @@ -164,7 +164,7 @@ spec:

    Log dump

    Log dump refers to rotating log files on a local host.

    -
    • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
    • Disabled: AOM does not dump log files.
    +
    • Enabled: AOM scans log files every minute. When a log file exceeds 50 MB, it is dumped immediately. A new .zip file is generated in the directory where the log file locates. For a log file, AOM stores only the latest 20 .zip files. When the number of .zip files exceeds 20, earlier .zip files will be deleted. After the dump is complete, the log file in AOM will be cleared.
    • Disabled: AOM does not dump log files.
    NOTE:
    • AOM rotates log files using copytruncate. Before enabling log dumping, ensure that log files are written in the append mode. Otherwise, file holes may occur.
    • Currently, mainstream log components such as Log4j and Logback support log file rotation. If you have already set rotation for log files, skip the configuration. Otherwise, conflicts may occur.
    • You are advised to configure log file rotation for your own services to flexibly control the size and number of rolled files.
    @@ -174,7 +174,7 @@ spec:

    Collection path

    A collection path narrows down the scope of collection to specified logs.

    -
    • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
    • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
    • * in log file names indicates a fuzzy match.
    +
    • If no collection path is specified, log files in .log, .trace, and .out formats will be collected from the specified path.
    • /Path/**/ indicates that all log files in .log, .trace, and .out formats will be recursively collected from the specified path and all subdirectories at 5 levels deep.
    • * in log file names indicates a fuzzy match.

    Example: The collection path /tmp/**/test*.log indicates that all .log files prefixed with test will be collected from /tmp and subdirectories at 5 levels deep.

    CAUTION:

    Ensure that ICAgent is of v5.12.22 or later.

    @@ -203,7 +203,7 @@ spec:

    Viewing Logs

    After a log collection path is configured and the workload is created, the ICAgent collects log files from the configured path. The collection takes about 1 minute.

    After the log collection is complete, go to the workload details page and click Logs in the upper right corner to view logs.

    You can also view logs on the AOM console.

    -

    You can also run the kubectl logs command to view the standard output of a container.

    +

    You can also run the kubectl logs command to view the container stdout.

    # View logs of a specified pod.
     kubectl logs <pod_name>
     kubectl logs -f <pod_name> # Similar to tail -f
    diff --git a/docs/cce/umn/cce_10_0024.html b/docs/cce/umn/cce_10_0024.html
    index bd11ca81..2e86f288 100644
    --- a/docs/cce/umn/cce_10_0024.html
    +++ b/docs/cce/umn/cce_10_0024.html
    @@ -5,8 +5,8 @@
     
    diff --git a/docs/cce/umn/cce_10_0025.html b/docs/cce/umn/cce_10_0025.html index 7dc3f0bc..801d5086 100644 --- a/docs/cce/umn/cce_10_0025.html +++ b/docs/cce/umn/cce_10_0025.html @@ -1,7 +1,8 @@

    CCE Operations Supported by Cloud Trace Service

    -
    +

    Cloud Trace Service (CTS) records operations on cloud service resources, allowing users to query, audit, and backtrack the resource operation requests initiated from the management console or open APIs as well as responses to the requests.

    +
    - @@ -263,7 +263,7 @@
    Table 1 CCE Operations Supported by CTS

    Operation

    Resource Type

    diff --git a/docs/cce/umn/cce_10_0026.html b/docs/cce/umn/cce_10_0026.html index 5d0636d9..89169934 100644 --- a/docs/cce/umn/cce_10_0026.html +++ b/docs/cce/umn/cce_10_0026.html @@ -1,20 +1,31 @@ -

    Querying Real-Time Traces

    -

    Scenarios

    After you enable CTS and the management tracker is created, CTS starts recording operations on cloud resources. CTS stores operation records generated in the last seven days.

    -

    This section describes how to query and export operation records of the last seven days on the CTS console.

    - +

    Viewing CTS Traces in the Trace List

    +

    Scenarios

    After you enable CTS and the management tracker is created, CTS starts recording operations on cloud resources. Cloud Trace Service (CTS) stores operation records (traces) generated in the last seven days.

    +

    These operation records are retained for seven days on the CTS console and are automatically deleted upon expiration. Manual deletion is not supported.

    +
    +

    This section describes how to query or export operation records of the last seven days on the CTS console.

    +
    -

    Viewing Real-Time Traces in the Trace List

    1. Log in to the management console.
    2. Click in the upper left corner and choose Management & Deployment > Cloud Trace Service. The CTS console is displayed.
    3. Choose Trace List in the navigation pane on the left.
    4. Set filters to search for your desired traces, as shown in Figure 1. The following filters are available:
      Figure 1 Filters
      +

      Constraints

      • You can only query operation records of the last seven days on the CTS console. To store operation records for longer than seven days, you must configure transfer to OBS or Log Tank Service (LTS) so that you can view them in OBS buckets or LTS log groups.
      • After performing operations on the cloud, you can query management traces on the CTS console one minute later.
      +
      +

      Viewing Real-Time Traces in the Trace List of the New Edition

      1. Log in to the management console.
      2. Click in the upper left corner and choose Management & Deployment > Cloud Trace Service. The CTS console is displayed.
      3. Choose Trace List in the navigation pane on the left.
      4. On the Trace List page, use advanced search to query traces. You can combine one or more filters.
        • Trace Name: Enter a trace name.
        • Trace ID: Enter a trace ID.
        • Resource Name: Enter a resource name. If the cloud resource involved in the trace does not have a resource name or the corresponding API operation does not involve the resource name parameter, leave this field empty.
        • Resource ID: Enter a resource ID. Leave this field empty if the resource has no resource ID or if resource creation failed.
        • Trace Source: Select a cloud service name from the drop-down list.
        • Resource Type: Select a resource type from the drop-down list.
        • Operator: Select one or more operators from the drop-down list.
        • Trace Status: Select normal, warning, or incident.
          • normal: The operation succeeded.
          • warning: The operation failed.
          • incident: The operation caused a fault that is more serious than the operation failure, for example, causing other faults.
          +
        • Time range: Select Last 1 hour, Last 1 day, or Last 1 week, or specify a custom time range within the last seven days.
        +
      5. On the Trace List page, you can also export and refresh the trace list, and customize columns to display.
        • Enter any keyword in the search box and press Enter to filter desired traces.
        • Click Export to export all traces in the query result as an .xlsx file. The file can contain up to 5,000 records.
        • Click to view the latest information about traces.
        • Click to customize the information to be displayed in the trace list. If Auto wrapping is enabled (), excess text will move down to the next line; otherwise, the text will be truncated. By default, this function is disabled.
        +
      6. For details about key fields in the trace structure, see and section "Trace References" > "Example Traces".
      7. (Optional) On the Trace List page of the new edition, click Go to Old Edition in the upper right corner to switch to the Trace List page of the old edition.
      +
      +

      Viewing Real-Time Traces in the Trace List of the Old Edition

      1. Log in to the management console.
      2. Click in the upper left corner and choose Management & Deployment > Cloud Trace Service. The CTS console is displayed.
      3. Choose Trace List in the navigation pane on the left.
      4. Each time you log in to the CTS console, the new edition is displayed by default. Click Go to Old Edition in the upper right corner to switch to the trace list of the old edition.
      5. Set filters to search for your desired traces, as shown in Figure 1. The following filters are available.
        Figure 1 Filters
        • Trace Type, Trace Source, Resource Type, and Search By: Select a filter from the drop-down list.
          • If you select Resource ID for Search By, specify a resource ID.
          • If you select Trace name for Search By, specify a trace name.
          • If you select Resource name for Search By, specify a resource name.
          -
        • Operator: Select a user.
        • Trace Status: Select All trace statuses, Normal, Warning, or Incident.
        • Time range: You can query traces generated during any time range in the last seven days.
        • Click Export to export all traces in the query result as a CSV file. The file can contain up to 5000 records.
        +
      6. Operator: Select a user.
      7. Trace Status: Select All trace statuses, Normal, Warning, or Incident.
      8. Time range: Select Last 1 hour, Last 1 day, or Last 1 week, or specify a custom time range within the last seven days.
      9. Click Export to export all traces in the query result as a CSV file. The file can contain up to 5,000 records.
      -
    5. Click Query.
    6. On the Trace List page, you can also export and refresh the trace list.
      • Click Export to export all traces in the query result as a CSV file. The file can contain up to 5000 records.
      • Click to view the latest information about traces.
      +
    7. Click Query.
    8. On the Trace List page, you can also export and refresh the trace list.
      • Click Export to export all traces in the query result as a CSV file. The file can contain up to 5,000 records.
      • Click to view the latest information about traces.
    9. Click on the left of a trace to expand its details.

      -

      +

      +

      +

    10. Click View Trace in the Operation column. The trace details are displayed.

      -
    11. For details about key fields in the trace structure, see section "Trace References" > "Trace Structure" and section "Trace References" > "Example Traces" in the CTS User Guide.
    +
  • For details about key fields in the trace structure, see section "Trace References" > "Trace Structure" and section "Trace References" > "Example Traces" in the CTS User Guide.
  • (Optional) On the Trace List page of the old edition, click New Edition in the upper right corner to switch to the Trace List page of the new edition.
  • diff --git a/docs/cce/umn/cce_10_0028.html b/docs/cce/umn/cce_10_0028.html index be8ac7c6..f03625b6 100644 --- a/docs/cce/umn/cce_10_0028.html +++ b/docs/cce/umn/cce_10_0028.html @@ -42,7 +42,7 @@

    Select the number of master nodes. The master nodes are automatically hosted by CCE and deployed with Kubernetes cluster management components such as kube-apiserver, kube-controller-manager, and kube-scheduler.

    • 3 Masters: Three master nodes will be created for high cluster availability.
    • Single: Only one master node will be created in your cluster.
    -
    You can also select AZs for the master nodes. By default, AZs are allocated automatically for the master nodes.
    • Automatic: Master nodes are randomly distributed in different AZs for cluster DR. If the number of available AZs is less than the number of nodes to be created, CCE will create the nodes in the AZs with sufficient resources to preferentially ensure cluster creation. In this case, AZ-level DR may not be ensured.
    • Custom: Master nodes are deployed in specific AZs.
      If there is one master node in your cluster, you can select one AZ for the master node. If there are multiple master nodes in your cluster, you can select multiple AZs for the master nodes.
      • AZ: Master nodes are deployed in different AZs for cluster DR.
      • Host: Master nodes are deployed on different hosts in the same AZ for cluster DR.
      • Custom: Master nodes are deployed in the AZs you specified.
      +
      You can also select AZs for the master nodes. By default, AZs are allocated automatically for the master nodes.
      • Automatic: Master nodes are randomly distributed in different AZs for cluster DR. If there are not enough AZs available, CCE will prioritize assigning nodes in AZs with enough resources to ensure cluster creation. However, this may result in AZ-level DR not being guaranteed.
      • Custom: Master nodes are deployed in specific AZs.
        If there is one master node in your cluster, you can select one AZ for the master node. If there are multiple master nodes in your cluster, you can select multiple AZs for the master nodes.
        • AZ: Master nodes are deployed in different AZs for cluster DR.
        • Host: Master nodes are deployed on different hosts in the same AZ for cluster DR.
        • Custom: Master nodes are deployed in the AZs you specified.
      @@ -72,7 +72,7 @@

    Default Security Group

    Select the security group automatically generated by CCE or use the existing one as the default security group of the node.
    NOTICE:

    The default security group must allow traffic from certain ports to ensure normal communication. Otherwise, the node cannot be created.

    +
    Select the security group automatically generated by CCE or use the existing one as the default security group of the node.
    NOTICE:

    The default security group must allow traffic from certain ports to ensure normal communication. Otherwise, the node cannot be created. For details, see Configuring Cluster Security Group Rules.

    -
    Observability +
    Observability

    Add-on Name

    Description

    diff --git a/docs/cce/umn/cce_10_0034.html b/docs/cce/umn/cce_10_0034.html new file mode 100644 index 00000000..4a131af4 --- /dev/null +++ b/docs/cce/umn/cce_10_0034.html @@ -0,0 +1,191 @@ + + +

    NGINX Ingress Controller

    +

    Introduction

    Kubernetes uses kube-proxy to expose Services and provide load balancing. The implementation is at the transport layer. When it comes to Internet applications, where a bucket-load of information is generated, forwarding needs to be more fine-grained, precisely and flexibly controlled by policies and load balancers to deliver higher performance.

    +

    This is where ingresses enter. Ingresses provide application-layer forwarding functions, such as virtual hosts, load balancing, SSL proxy, and HTTP routing, for Services that can be directly accessed outside a cluster.

    +

    Kubernetes has officially released the Nginx-based Ingress controller. CCE Nginx Ingress controller directly uses community templates and images. The NGINX Ingress Controller generates Nginx configuration and stores the configuration using ConfigMap. The configuration will be written to Nginx pods through the Kubernetes API. In this way, the Nginx configuration is modified and updated. For details, see How the Add-on Works.

    +

    You can visit the open source community for more information.

    +
    • Starting from version 2.3.3, NGINX Ingress Controller only supports TLS v1.2 and v1.3 by default. If the TLS version is earlier than v1.2, an error will occur during the negotiation process between the client and Nginx Ingress. If more versions of TLS are needed, see TLS/HTTPS.
    • When installing the NGINX Ingress Controller, you can specify Nginx parameters. These parameters take effect globally and are contained in the nginx.conf file. You can search for the parameters in ConfigMaps. If the parameters are not included in ConfigMaps, the configurations will not take effect.
    • Do not manually modify or delete the load balancer and listener that are automatically created by CCE. Otherwise, the workload will be abnormal. If you have modified or deleted them by mistake, uninstall the nginx-ingress add-on and re-install it.
    +
    +
    +

    How the Add-on Works

    NGINX Ingress Controller consists of the ingress object, ingress controller, and Nginx. The ingress controller assembles ingresses into the Nginx configuration file (nginx.conf) and reloads Nginx to make the changed configurations take effect. When it detects that the pod in a Service changes, it dynamically changes the upstream server group configuration of Nginx. In this case, the Nginx process does not need to be reloaded. Figure 1 shows how this add-on works.

    +
    • An ingress is a group of access rules that forward requests to specified Services based on domain names or URLs. Ingresses are stored in the object storage service etcd and are added, deleted, modified, and queried through APIs.
    • The ingress controller monitors the changes of resource objects such as ingresses, Services, endpoints, secrets (mainly TLS certificates and keys), nodes, and ConfigMaps in real time and automatically performs operations on Nginx.
    • Nginx implements load balancing and access control at the application layer.
    +
    Figure 1 Working principles of NGINX Ingress Controller
    +
    +

    Precautions

    • For clusters earlier than v1.23, kubernetes.io/ingress.class: "nginx" must be added to the annotation of the ingress created through the API.
    • Dedicated load balancers must be the network type (TCP/UDP) supporting private networks (with a private IP).
    • If the node where NGINX Ingress Controller runs and containers on this node cannot access Nginx ingress, you need to configure anti-affinity for the workload pods and Nginx Ingress Controller. For details, see Configuring Anti-affinity Between a Workload and Nginx Ingress Controller.
    • During the NGINX Ingress Controller pod upgrade, 10s are reserved for deleting the NGINX Ingress Controller at the ELB backend.
    • The timeout duration for the graceful exit of the NGINX Ingress Controller is 300s. If the timeout duration is longer than 300s during the upgrade of the NGINX Ingress Controller, persistent connections will be disconnected, and connectivity will be interrupted for a short period of time.
    +
    +

    Prerequisites

    Before installing this add-on, you have one available cluster and there is a node running properly. If no cluster is available, create one according to Creating a CCE Standard/Turbo Cluster.

    +
    +

    Installing the Add-on

    1. Log in to the CCE console and click the cluster name to access the cluster console. Choose Add-ons in the navigation pane, locate NGINX Ingress Controller on the right, and click Install.
    2. On the Install Add-on page, configure the specifications.

      +

      + + + + + + + + + + + + + +
      Table 1 Add-on configuration

      Parameter

      +

      Description

      +

      Add-on Specifications

      +

      Nginx Ingress can be deployed based on customized resource specifications.

      +

      Pods

      +

      You can adjust the number of add-on instances as required.

      +

      High availability is not possible with a single pod. If an error occurs on the node where the add-on instance runs, the add-on will fail.

      +

      Containers

      +

      You can adjust the container specifications of an add-on instance as required.

      +
      +
      +

    3. Configure the add-on parameters.

      • Ingress Class: Enter a custom controller name, which uniquely identifies an Ingress controller. The name of each controller in the same cluster must be unique and cannot be set to cce. (cce is the unique identifier of the ELB Ingress Controller.) When creating an Ingress, you can specify the controller name to declare which controller should manage this Ingress.
      • Namespace: Select a namespace where the ingress controller is in.
      • Load Balancer: Select a shared or dedicated load balancer. If no load balancer is available, create one. The load balancer has at least two listeners, and ports 80 and 443 are not occupied by listeners.
      • Admission Check: Admission control is performed on Ingresses to ensure that the controller can generate valid configurations. Admission verification is performed on the configuration of Nginx Ingresses. If the verification fails, the request will be intercepted. For details about admission verification, see Access Control.
        • Admission check slows down the responses to Ingress requests.
        • Only add-ons of version 2.4.1 or later support admission verification.
        +
        +
      • Nginx Parameters: Configuring the nginx.conf file will affect all managed ingresses. You can search for related parameters through ConfigMaps. If the parameters you configured are not included in the options listed in the ConfigMaps, the parameters will not take effect.

        For example, you can use the keep-alive-requests parameter to describe how to set the maximum number of requests for keeping active connections to 100.

        +
        {
        +    "keep-alive-requests": "100"
        +}
        +
      • Enabling Indicator Collection: If the add-on version is 2.4.12 or later, Prometheus monitoring metrics can be collected.
      • Default server certificate: Select an IngressTLS or kubernetes.io/tls key to configure the default certificate when an Nginx Ingress Controller is started. If no secret is available, click Create TLS Secret. For details, see Creating a Secret. For details about the default server certificate, see Default SSL Certificate.
      • 404 Service: By default, the 404 service provided by the add-on is used. To customize the 404 service, enter the namespace/service name. If the service does not exist, the add-on installation will fail.
      • Adding a TCP/UDP Service: By default, Nginx Ingress Controller can forward only external HTTP and HTTPS traffic. You can add TCP/UDP port mapping to forward external TCP/UDP traffic to services in the cluster. For more information about adding TCP/UDP services, see Exposing TCP and UDP services.
        • Protocol: Select TCP or UDP.
        • Service Port: specifies the port used by the ELB listener. The port number ranges from 1 to 65535.
        • Target Service Namespace: Select the namespace where the Service is in.
        • Destination Service: Select an existing Service. Any services that do not match the search criteria will be filtered out automatically.
        • Destination Service Port: Select the access port of the destination Service.
        +
        • If the cluster version is v1.19.16-r5, v1.21.8-r0, v1.23.6-r0, or later, the TCP/UDP hybrid protocols can be configured.
        • If the cluster version is v1.19.16-r5, v1.21.8-r0, v1.23.6-r0, v1.25.2-r0, or later, you can configure the TCP/UDP hybrid protocols to use the same external port.
        +
        +
      +

    4. Configure scheduling policies for the add-on.

      • Scheduling policies do not take effect on add-on instances of the DaemonSet type.
      • When configuring multi-AZ deployment or node affinity, ensure that there are nodes meeting the scheduling policy and that resources are sufficient in the cluster. Otherwise, the add-on cannot run.
      +
      + +
      + + + + + + + + + + + + + +
      Table 2 Configurations for add-on scheduling

      Parameter

      +

      Description

      +

      Multi AZ

      +
      • Preferred: Deployment pods of the add-on will be preferentially scheduled to nodes in different AZs. If all the nodes in the cluster are deployed in the same AZ, the pods will be scheduled to that AZ.
      • Equivalent mode: Deployment pods of the add-on are evenly scheduled to the nodes in the cluster in each AZ. If a new AZ is added, you are advised to increase add-on pods for cross-AZ HA deployment. With the Equivalent multi-AZ deployment, the difference between the number of add-on pods in different AZs will be less than or equal to 1. If resources in one of the AZs are insufficient, pods cannot be scheduled to that AZ.
      • Required: Deployment pods of the add-on will be forcibly scheduled to nodes in different AZs. If there are fewer AZs than pods, the extra pods will fail to run.
      +

      Node Affinity

      +
      • Not configured: Node affinity is disabled for the add-on.
      • Node Affinity: Specify the nodes where the add-on is deployed. If you do not specify the nodes, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Specified Node Pool Scheduling: Specify the node pool where the add-on is deployed. If you do not specify the node pool, the add-on will be randomly scheduled based on the default cluster scheduling policy.
      • Custom Policies: Enter the labels of the nodes where the add-on is to be deployed for more flexible scheduling policies. If you do not specify node labels, the add-on will be randomly scheduled based on the default cluster scheduling policy.

        If multiple custom affinity policies are configured, ensure that there are nodes that meet all the affinity policies in the cluster. Otherwise, the add-on cannot run.

        +
      +

      Toleration

      +

      Using both taints and tolerations allows (not forcibly) the add-on Deployment to be scheduled to a node with the matching taints, and controls the Deployment eviction policies after the node where the Deployment is located is tainted.

      +

      The add-on adds the default tolerance policy for the node.kubernetes.io/not-ready and node.kubernetes.io/unreachable taints, respectively. The tolerance time window is 60s.

      +

      For details, see Configuring Tolerance Policies.

      +
      +
      +

    5. Click Install.
    +
    +

    Installing Multiple Nginx Ingress Controllers

    1. Log in to the CCE console and click the cluster name to access the cluster console. In the navigation pane, choose Add-ons, locate the installed Nginx Ingress Controller, and click New.
    2. On the page displayed, reconfigure the add-on parameters. For details, see Installing the Add-on.
    3. Click Install.
    4. Wait until the installation instruction is delivered. Go back to Add-ons, click Manage, and view the installed add-on instance on the add-on details page.
    +
    +

    Components

    +
    + + + + + + + + + + + + + +
    Table 3 Add-on components

    Component

    +

    Description

    +

    Resource Type

    +

    cceaddon-nginx-ingress-<Controller name>-controller

    +

    (The controller name in versions earlier than 2.5.4 is cceaddon-nginx-ingress-controller.)

    +

    Nginx Ingress controller, which provides flexible routing and forwarding for clusters

    +

    Deployment

    +

    cceaddon-nginx-ingress-<Controller name>-backend

    +

    (The controller name in versions earlier than 2.5.4 is cceaddon-nginx-ingress-default-backend.)

    +

    Default backend of Nginx Ingress. The message "default backend - 404" is returned.

    +

    Deployment

    +
    +
    +
    +

    Configuring Anti-affinity Between a Workload and Nginx Ingress Controller

    To avoid a situation where the node running NGINX Ingress Controller and its containers cannot access the Nginx Ingress Controller, you should set up anti-affinity between the workload and Nginx Ingress Controller. This means that the workload pods cannot be scheduled to the same node as the Nginx Ingress Controller.

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: nginx
    +spec:
    +  replicas: 1
    +  selector:
    +    matchLabels:
    +      app: nginx
    +  strategy:
    +    type: RollingUpdate
    +  template:
    +    metadata:
    +      labels:
    +        app: nginx
    +    spec:
    +      containers:
    +      - image: nginx:aplpine
    +        imagePullPolicy: IfNotPresent
    +        name: nginx
    +      imagePullSecrets:
    +      - name: default-secret
    +      affinity:
    +        podAntiAffinity:
    +          requiredDuringSchedulingIgnoredDuringExecution:
    +            - labelSelector:
    +                matchExpressions:    # Implement anti-affinity through the label of the NGINX Ingress Controller pods.
    +                  - key: app
    +                    operator: In
    +                    values:
    +                      - nginx-ingress   #If multiple NGINX Ingress Controllers are installed in the cluster, the label value is nginx-ingress-<Controller name>.
    +                  - key: component
    +                    operator: In
    +                    values:
    +                      - controller
    +              namespaces:
    +                - kube-system
    +              topologyKey: kubernetes.io/hostname
    +
    +

    Change History

    +
    + + + + + + + + + + + +
    Table 4 Release history for NGINX Ingress Controller 2.6.x

    Add-on Version

    +

    Supported Cluster Version

    +

    New Feature

    +

    Community Version

    +

    2.6.5

    +

    v1.25

    +

    v1.27

    +

    v1.28

    +

    v1.29

    +

    Metric collection can be disabled in the startup command.

    +

    1.9.6

    +
    +
    +
    +
    +
    + +
    + diff --git a/docs/cce/umn/cce_10_0045.html b/docs/cce/umn/cce_10_0045.html index f1d3a200..9694dada 100644 --- a/docs/cce/umn/cce_10_0045.html +++ b/docs/cce/umn/cce_10_0045.html @@ -7,11 +7,11 @@ +After a ConfigMap is created, it can be used in three workload scenarios: environment variables, command line parameters, and data volumes. +After secrets are created, they can be mounted as data volumes or be exposed as environment variables to be used by a container in a pod. diff --git a/docs/cce/umn/cce_10_0046.html b/docs/cce/umn/cce_10_0046.html index a2010651..ec6e799b 100644 --- a/docs/cce/umn/cce_10_0046.html +++ b/docs/cce/umn/cce_10_0046.html @@ -5,7 +5,7 @@
    +

    Ingress Feature Comparison

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Comparison between ingress features

    Feature

    +

    ELB Ingress Controller

    +

    Nginx Ingress Controller

    +

    O&M

    +

    O&M-free

    +

    Self-installation, upgrade, and maintenance

    +

    Performance

    +

    One ingress supports only one load balancer.

    +

    Multiple ingresses support one load balancer.

    +

    Enterprise-grade load balancers are used to provide high performance and high availability. Service forwarding is not affected in upgrade and failure scenarios.

    +

    Performance varies depending on the resource configuration of pods.

    +

    Dynamic loading is supported.

    +
    • Processes must be reloaded for non-backend endpoint changes, which causes loss to persistent connections.
    • Lua supports hot updates of endpoint changes.
    • Processes must be reloaded for a Lua modification.
    +

    Component deployment

    +

    Deployed on the master node

    +

    Deployed on worker nodes, and operations costs required for the Nginx component

    +

    Route redirection

    +

    Supported

    +

    Supported

    +

    SSL configuration

    +

    Supported

    +

    Supported

    +

    Using ingress as a proxy for backend services

    +

    Supported

    +

    Supported, which can be implemented through backend-protocol: HTTPS annotations.

    +
    +
    +

    The LoadBalancer ingress is essentially different from the open source Nginx Ingress. Therefore, their supported Service types are different. For details, see Services Supported by Ingresses.

    +

    LoadBalancer Ingress Controller is deployed on a master node. All policies and forwarding behaviors are configured on the ELB side. Load balancers outside the cluster can connect to nodes in the cluster only through the IP address of the VPC in non-passthrough networking scenarios. Therefore, LoadBalancer ingresses support only NodePort Services. However, in the passthrough networking scenario (CCE Turbo cluster + dedicated load balancer), ELB can directly forward traffic to pods in the cluster. In this case, the ingress can only interconnect with ClusterIP Services.

    +

    Nginx Ingress Controller runs in a cluster and is exposed as a Service through NodePort. Traffic is forwarded to other Services in the cluster through Nginx-ingress. The traffic forwarding behavior and forwarding object are in the cluster. Therefore, both ClusterIP and NodePort Services are supported.

    +

    In conclusion, LoadBalancer ingresses use enterprise-grade load balancers to forward traffic and delivers high performance and stability. Nginx Ingress Controller is deployed on cluster nodes, which consumes cluster resources but has better configurability.

    Working Rules of LoadBalancer Ingress Controller

    LoadBalancer Ingress Controller developed by CCE implements layer-7 network access for the internet and intranet (in the same VPC) based on ELB and distributes access traffic to the corresponding Services using different URLs.

    LoadBalancer Ingress Controller is deployed on the master node and bound to the load balancer in the VPC where the cluster resides. Different domain names, ports, and forwarding policies can be configured for the same load balancer (with the same IP address). Figure 2 shows the working rules of LoadBalancer Ingress Controller.

    @@ -14,47 +84,102 @@

    When you use a dedicated load balancer in a CCE Turbo cluster, pod IP addresses are allocated from the VPC and the load balancer can directly access the pods. When creating an ingress for external cluster access, you can use ELB to access a ClusterIP Service and use pods as the backend server of the ELB listener. In this way, external traffic can directly access the pods in the cluster without being forwarded by node ports.

    Figure 3 Working rules of passthrough networking for dedicated LoadBalancer ingresses in CCE Turbo clusters
    -

    Services Supported by Ingresses

    Table 1 lists the Services supported by LoadBalancer ingresses. -
    Table 1 Services supported by LoadBalancer ingresses

    Cluster Type

    +

    Working Rules of Nginx Ingress Controller

    Nginx Ingress uses ELB as the traffic ingress. The nginx-ingress add-on is deployed in a cluster to balance traffic and control access.

    +

    nginx-ingress uses the templates and images provided by the open-source community, and issues may occur during usage. CCE periodically synchronizes the community version to fix known vulnerabilities. Check whether your service requirements can be met.

    +

    You can visit the open source community for more information.

    +
    +

    Nginx Ingress Controller is deployed on worker nodes through pods, which will result in O&M costs and Nginx component running overheads. Figure 4 shows the working rules of Nginx Ingress Controller.

    +
    1. After you update ingress resources, Nginx Ingress Controller writes a forwarding rule defined in the ingress resources into the nginx.conf configuration file of Nginx.
    2. The built-in Nginx component reloads the updated configuration file to modify and update the Nginx forwarding rule.
    3. When traffic accesses a cluster, the traffic is first forwarded by the created load balancer to the Nginx component in the cluster. Then, the Nginx component forwards the traffic to each workload based on the forwarding rule.
    +
    Figure 4 Working rules of Nginx Ingress Controller
    +
    +

    Services Supported by Ingresses

    Table 2 lists the Services supported by LoadBalancer ingresses. +
    - - - - - - - - - - - - - - - - - + + +
    Table 2 Services supported by LoadBalancer ingresses

    Cluster Type

    ELB Type

    +

    ELB Type

    ClusterIP

    +

    ClusterIP

    NodePort

    +

    NodePort

    CCE standard cluster

    +

    CCE standard cluster

    Shared load balancer

    +

    Shared load balancer

    Not supported

    +

    Not supported

    Supported

    +

    Supported

    Dedicated load balancer

    +

    Dedicated load balancer

    Not supported (Failed to access the dedicated load balancers because no ENI is bound to the associated pod of the ClusterIP Service.)

    +

    Not supported (Failed to access the dedicated load balancers because no ENI is bound to the associated pod of the ClusterIP Service.)

    Supported

    +

    Supported

    CCE Turbo cluster

    +

    CCE Turbo cluster

    Shared load balancer

    +

    Shared load balancer

    Not supported

    +

    Not supported

    Supported

    +

    Supported

    Dedicated load balancer

    +

    Dedicated load balancer

    Supported

    +

    Supported

    Not supported (Failed to access the dedicated load balancers because an ENI has been bound to the associated pod of the NodePort Service.)

    +

    Not supported (Failed to access the dedicated load balancers because an ENI has been bound to the associated pod of the NodePort Service.)

    +
    +
    +
    +
    Table 3 lists the Services supported by Nginx Ingress. +
    + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/cce/umn/cce_10_0132.html b/docs/cce/umn/cce_10_0132.html index 998f9ae0..a1a56deb 100644 --- a/docs/cce/umn/cce_10_0132.html +++ b/docs/cce/umn/cce_10_0132.html @@ -156,7 +156,7 @@

    Typical scenario: Disk I/O suspension causes process suspension.

    @@ -168,7 +168,7 @@ @@ -408,7 +408,6 @@ diff --git a/docs/cce/umn/cce_10_0142.html b/docs/cce/umn/cce_10_0142.html index a9a52529..29fc755a 100644 --- a/docs/cce/umn/cce_10_0142.html +++ b/docs/cce/umn/cce_10_0142.html @@ -7,7 +7,7 @@

    Notes and Constraints

    • By default, a NodePort Service is accessed within a VPC. To use an EIP to access a NodePort Service through public networks, bind an EIP to the node in the cluster in advance.
    • After a Service is created, if the affinity setting is switched from the cluster level to the node level, the connection tracing table will not be cleared. Do not modify the Service affinity setting after the Service is created. To modify it, create a Service again.
    • In a CCE Turbo cluster, node-level affinity is supported only when the Service backend is connected to a HostNetwork pod.
    • In VPC network mode, when container A is published through a NodePort service and the service affinity is set to the node level (that is, externalTrafficPolicy is set to local), container B deployed on the same node cannot access container A through the node IP address and NodePort service.
    • When a NodePort service is created in a cluster of v1.21.7 or later, the port on the node is not displayed using netstat by default. If the cluster forwarding mode is iptables, run the iptables -t nat -L command to view the port. If the cluster forwarding mode is IPVS, run the ipvsadm -Ln command to view the port.

    Creating a NodePort Service

    1. Log in to the CCE console and click the cluster name to access the cluster console.
    2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
    3. Configure intra-cluster access parameters.

      • Service Name: Specify a Service name, which can be the same as the workload name.
      • Service Type: Select NodePort.
      • Namespace: namespace that the workload belongs to.
      • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
        • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
        • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
        -
      • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
      • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
      • Ports
        • Protocol: protocol used by the Service.
        • Service Port: port used by the Service. The port number ranges from 1 to 65535.
        • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.
        • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.
        +
      • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
      • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
      • Ports
        • Protocol: protocol used by the Service.
        • Service Port: port used by the Service. The port number ranges from 1 to 65535.
        • Container Port: listener port of the workload. For example, Nginx uses port 80 by default.
        • Node Port: You are advised to select Auto. You can also specify a port. The default port ranges from 30000 to 32767.

    4. Click OK.
    diff --git a/docs/cce/umn/cce_10_0154.html b/docs/cce/umn/cce_10_0154.html index e811513c..c60229fc 100644 --- a/docs/cce/umn/cce_10_0154.html +++ b/docs/cce/umn/cce_10_0154.html @@ -199,7 +199,7 @@

    The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers scale-in. This interval takes effect at the cluster level.

    Change History

    -
    Table 3 Services supported by Nginx Ingress

    Cluster Type

    +

    ELB Type

    +

    ClusterIP

    +

    NodePort

    +

    CCE standard cluster

    +

    Shared load balancer

    +

    Supported

    +

    Supported

    +

    Dedicated load balancer

    +

    Supported

    +

    Supported

    +

    CCE Turbo cluster

    +

    Shared load balancer

    +

    Supported

    +

    Supported

    +

    Dedicated load balancer

    +

    Supported

    +

    Supported

    Warning event

    -

    Listening object: /dev/kmsg

    +

    Listening object: /dev/kmsg

    Matching rule: "task \\S+:\\w+ blocked for more than \\w+ seconds\\."

    Warning event

    -

    Listening object: /dev/kmsg

    +

    Listening object: /dev/kmsg

    Matching rule: Remounting filesystem read-only

    Default threshold: 10 abnormal processes detected for three consecutive times

    Source:

    • /proc/{PID}/stat
    • Alternately, you can run the ps aux command.
    -

    Exceptional scenario: The ProcessD check item ignores the resident D processes (heartbeat and update) on which the SDI driver on the BMS node depends.

    Table 6 Updates of the add-on adapted to clusters 1.29

    Add-on Version

    +
    @@ -231,7 +231,7 @@
    Table 6 Release history for add-on adapted to clusters 1.29

    Add-on Version

    Supported Cluster Version

    -
    Table 7 Updates of the add-on adapted to clusters 1.28

    Add-on Version

    +
    @@ -281,7 +281,7 @@
    Table 7 Release history for add-on adapted to clusters 1.28

    Add-on Version

    Supported Cluster Version

    -
    Table 8 Updates of the add-on adapted to clusters 1.27

    Add-on Version

    +
    @@ -331,7 +331,7 @@
    Table 8 Release history for add-on adapted to clusters 1.27

    Add-on Version

    Supported Cluster Version

    -
    Table 9 Updates of the add-on adapted to clusters 1.25

    Add-on Version

    +
    @@ -399,7 +399,7 @@
    Table 9 Release history for add-on adapted to clusters 1.25

    Add-on Version

    Supported Cluster Version

    -
    Table 10 Updates of the add-on adapted to clusters 1.23

    Add-on Version

    +
    @@ -476,7 +476,7 @@
    Table 10 Release history for add-on adapted to clusters 1.23

    Add-on Version

    Supported Cluster Version

    -
    Table 11 Updates of the add-on adapted to clusters 1.21

    Add-on Version

    +
    @@ -562,7 +562,7 @@
    Table 11 Release history for add-on adapted to clusters 1.21

    Add-on Version

    Supported Cluster Version

    -
    Table 12 Updates of the add-on adapted to clusters 1.19

    Add-on Version

    +
    @@ -648,7 +648,7 @@
    Table 12 Release history for add-on adapted to clusters 1.19

    Add-on Version

    Supported Cluster Version

    -
    Table 13 Updates of the add-on adapted to clusters 1.17

    Add-on Version

    +
    diff --git a/docs/cce/umn/cce_10_0198.html b/docs/cce/umn/cce_10_0198.html index fe55c02d..008d1e10 100644 --- a/docs/cce/umn/cce_10_0198.html +++ b/docs/cce/umn/cce_10_0198.html @@ -5,7 +5,7 @@
    • When accepting an ECS, you can reset the ECS OS to a standard public image offered by CCE. If you choose to do so, you need to reset the password or key pair, and the previous password or key pair will become invalid.
    • LVM information, including volume groups (VGs), logical volumes (LVs), and physical volumes (PVs), will be deleted from the system disks and data disks attached to the selected ECSs during acceptance. Ensure that the information has been backed up.
    • During the acceptance of an ECS, do not perform any operation on the ECS through the ECS console.
    -

    Notes and Constraints

    • ECSs and BMSs can be managed.
    +

    Notes and Constraints

    • ECSs can be managed.

    Prerequisites

    The cloud servers to be managed must meet the following requirements:

    • The node to be accepted must be in the Running state and not used by other clusters. In addition, the node to be accepted does not carry the CCE-Dynamic-Provisioning-Node tag.
    • The node to be accepted and the cluster must be in the same VPC. (If the cluster version is earlier than v1.13.10, the node to be accepted and the CCE cluster must be in the same subnet.)
    • Data disks must be attached to the nodes to be managed. A local disk (disk-intensive disk) or a data disk of at least 20 GiB can be attached to the node, and any data disks already attached cannot be smaller than 10 GiB.
    • The node to be accepted has 2-core or higher CPU, 4 GiB or larger memory, and only one NIC.
    • Only cloud servers with the same data disk configurations can be added in batches.
    • If IPv6 is enabled for a cluster, only nodes in a subnet with IPv6 enabled can be accepted and managed. If IPv6 is not enabled for the cluster, only nodes in a subnet without IPv6 enabled can be accepted.
    • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node flavors, see the node flavors that can be selected on the console when you create a node.
    • Data disks that have been partitioned will be ignored during node management. Ensure that there is at least one unpartitioned data disk meeting the specifications is attached to the node.
    diff --git a/docs/cce/umn/cce_10_0209.html b/docs/cce/umn/cce_10_0209.html index fcf554a9..64fab30a 100644 --- a/docs/cce/umn/cce_10_0209.html +++ b/docs/cce/umn/cce_10_0209.html @@ -71,8 +71,8 @@

    The interval after a node is deleted indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers a scale-in. This setting takes effect in the entire cluster.

    The interval after a failed scale-in indicates the period during which the cluster cannot be scaled in after the Autoscaler add-on triggers a scale-in. This setting takes effect in the entire cluster.

    -

    Period for Autoscaler to Retry a Scale-out

    If a node pool failed to scale out, for example, due to insufficient resources or quota, or an error occurred during node installation, Autoscaler can retry the scale-out in the node pool or switch to another node pool. The retry period varies depending on failure causes:

    -
    • When resources in a node pool are sold out or the user quota is insufficient, Autoscaler cools down the node pool for 5 minutes, 10 minutes, or 20 minutes. The maximum cooldown duration is 30 minutes. Then, Autoscaler switches to another node pool for a scale-out in the next 10 seconds until the expected node is added or all node pools are cooled down.
    • If an error occurred during node installation in a node pool, the node pool enters a 5-minute cooldown period. After the period expires, Autoscaler can trigger a node pool scale-out again. If the faulty node is automatically reclaimed, Cluster Autoscaler re-evaluates the cluster status within 1 minute and triggers a node pool scale-out as needed.
    • During a node pool scale-out, if a node remains in the installing state for a long time, Cluster Autoscaler tolerates the node for a maximum of 15 minutes. After the tolerance period expires, Cluster Autoscaler re-evaluates the cluster status and triggers a node pool scale-out as needed.
    +

    Period for Autoscaler to Retry a Scale-out

    If a node pool failed to scale out, for example, due to insufficient quota, or an error occurred during node installation, Autoscaler can retry the scale-out in the node pool or switch to another node pool. The retry period varies depending on failure causes:

    +
    • When the user quota is insufficient, Autoscaler cools down the node pool for 5 minutes, 10 minutes, or 20 minutes. The maximum cooldown duration is 30 minutes. Then, Autoscaler switches to another node pool for a scale-out in the next 10 seconds until the expected node is added or all node pools are cooled down.
    • If an error occurred during node installation in a node pool, the node pool enters a 5-minute cooldown period. After the period expires, Autoscaler can trigger a node pool scale-out again. If the faulty node is automatically reclaimed, Cluster Autoscaler re-evaluates the cluster status within 1 minute and triggers a node pool scale-out as needed.
    • During a node pool scale-out, if a node remains in the installing state for a long time, Cluster Autoscaler tolerates the node for a maximum of 15 minutes. After the tolerance period expires, Cluster Autoscaler re-evaluates the cluster status and triggers a node pool scale-out as needed.

    Example YAML

    The following is a YAML example of a node scaling policy:

    apiVersion: autoscaling.cce.io/v1alpha1
    diff --git a/docs/cce/umn/cce_10_0248.html b/docs/cce/umn/cce_10_0248.html
    index 22cca7e1..9d275f3d 100644
    --- a/docs/cce/umn/cce_10_0248.html
    +++ b/docs/cce/umn/cce_10_0248.html
    @@ -8,6 +8,8 @@
     
     
    +
     
     
     
    Table 13 Release history for add-on adapted to clusters 1.17

    Add-on Version

    Supported Cluster Version

    Table 1 Load balancer configurations

    How to Create

    +
    - - - - - @@ -42,41 +42,41 @@
  • Backend Protocol:

    When the listener is HTTP-compliant, only HTTP can be selected.

    If it is an HTTPS listener, this parameter can be set to HTTP or HTTPS.

  • Advanced Options -
  • Table 1 Load balancer configurations

    How to Create

    Configuration

    +

    Configuration

    Use existing

    +

    Use existing

    Only the load balancers in the same VPC as the cluster can be selected. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

    +

    Only the load balancers in the same VPC as the cluster can be selected. If no load balancer is available, click Create Load Balancer to create one on the ELB console.

    Auto create

    +

    Auto create

    • Instance Name: Enter a load balancer name.
    • AZ: available only to dedicated load balancers. You can create load balancers in multiple AZs to improve service availability. You can deploy a load balancer in multiple AZs for high availability.
    • Frontend Subnet: available only to dedicated load balancers. It is used to allocate IP addresses for load balancers to provide services externally.
    • Backend Subnet: available only to dedicated load balancers. It is used to allocate IP addresses for load balancers to access the backend service.
    • Network/Application-oriented Specifications (available only to dedicated load balancers)
      • Elastic: applies to fluctuating traffic, billed based on total traffic. Clusters of v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later versions support elastic specifications.
      • Fixed: applies to stable traffic, billed based on specifications.
      +
    • Instance Name: Enter a load balancer name.
    • AZ: available only to dedicated load balancers. You can create load balancers in multiple AZs to improve service availability. You can deploy a load balancer in multiple AZs for high availability.
    • Frontend Subnet: available only to dedicated load balancers. It is used to allocate IP addresses for load balancers to provide services externally.
    • Backend Subnet: available only to dedicated load balancers. It is used to allocate IP addresses for load balancers to access the backend service.
    • Network/Application-oriented Specifications (available only to dedicated load balancers)
      • Elastic: applies to fluctuating traffic, billed based on total traffic. Clusters of v1.21.10-r10, v1.23.8-r10, v1.25.3-r10, and later versions support elastic specifications.
      • Fixed: applies to stable traffic, billed based on specifications.
    • EIP: If you select Auto create, you can configure the billing mode and size of the public network bandwidth.
    • Resource Tag: You can add resource tags to classify resources. You can create predefined tags on the TMS console. The predefined tags are available to all resources that support tags. You can use predefined tags to improve the tag creation and resource migration efficiency.

    Configuration

    +
    - - - - - - - - - - - - - - @@ -93,37 +93,37 @@
    • When the distribution policy uses the source IP hash, sticky session cannot be set.
    • Dedicated load balancers in the clusters of a version earlier than v1.21 do not support sticky sessions. If sticky sessions are required, use shared load balancers.
  • Health Check: Set the health check configuration of the load balancer. If this function is enabled, the following configurations are supported: -
  • Configuration

    Description

    +

    Description

    Restrictions

    +

    Restrictions

    Idle Timeout

    +

    Idle Timeout

    Timeout for an idle client connection. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

    +

    Timeout for an idle client connection. If there are no requests reaching the load balancer during the timeout duration, the load balancer will disconnect the connection from the client and establish a new connection when there is a new request.

    None

    +

    None

    Request Timeout

    +

    Request Timeout

    Timeout for waiting for a request from a client. There are two cases:

    +

    Timeout for waiting for a request from a client. There are two cases:

    • If the client fails to send a request header to the load balancer during the timeout duration, the request will be interrupted.
    • If the interval between two consecutive request bodies reaching the load balancer is greater than the timeout duration, the connection will be disconnected.

    None

    +

    None

    Response Timeout

    +

    Response Timeout

    Timeout for waiting for a response from a backend server. After a request is forwarded to the backend server, if the backend server does not respond during the timeout duration, the load balancer will stop waiting and return HTTP 504 Gateway Timeout.

    +

    Timeout for waiting for a response from a backend server. After a request is forwarded to the backend server, if the backend server does not respond during the timeout duration, the load balancer will stop waiting and return HTTP 504 Gateway Timeout.

    None

    +

    None

    HTTP2

    +

    HTTP2

    Whether to use HTTP/2 for a client to communicate with a load balancer. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP/1.x to forward requests to the backend server.

    +

    Whether to use HTTP/2 for a client to communicate with a load balancer. Request forwarding using HTTP/2 improves the access performance between your application and the load balancer. However, the load balancer still uses HTTP/1.x to forward requests to the backend server.

    This function is available only when the listener is HTTPS-compliant.

    +

    This function is available only when the listener is HTTPS-compliant.

    Parameter

    +
    - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0252.html b/docs/cce/umn/cce_10_0252.html index 7c1f8de7..73ba4c54 100644 --- a/docs/cce/umn/cce_10_0252.html +++ b/docs/cce/umn/cce_10_0252.html @@ -103,6 +103,7 @@ metadata: ], "l7_flavor_name": "L7_flavor.elb.s1.small" }' + kubernetes.io/elb.tags: key1=value1,key2=value2 # ELB resource tags spec: rules: - host: '' diff --git a/docs/cce/umn/cce_10_0277.html b/docs/cce/umn/cce_10_0277.html index 3e0efd74..93b939c2 100644 --- a/docs/cce/umn/cce_10_0277.html +++ b/docs/cce/umn/cce_10_0277.html @@ -84,6 +84,11 @@ + + +

    Parameter

    Description

    +

    Description

    Protocol

    +

    Protocol

    When the protocol of the target Service port is TCP, more protocols including HTTP are supported.

    +

    When the protocol of the target Service port is TCP, more protocols including HTTP are supported.

    • Check Path (supported only by HTTP for health check): specifies the health check URL. The check path must start with a slash (/) and contain 1 to 80 characters.

    Port

    +

    Port

    By default, the service port (NodePort or container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a service port named cce-healthz will be added for the Service.

    +

    By default, the service port (NodePort or container port of the Service) is used for health check. You can also specify another port for health check. After the port is specified, a service port named cce-healthz will be added for the Service.

    • Node Port: If a shared load balancer is used or no ENI instance is associated, the node port is used as the health check port. If this parameter is not specified, a random port is used. The value ranges from 30000 to 32767.
    • Container Port: When a dedicated load balancer is associated with an ENI instance, the container port is used for health check. The value ranges from 1 to 65535.

    Check Period (s)

    +

    Check Period (s)

    Specifies the maximum interval between health checks. The value ranges from 1 to 50.

    +

    Specifies the maximum interval between health checks. The value ranges from 1 to 50.

    Timeout (s)

    +

    Timeout (s)

    Specifies the maximum timeout duration for each health check. The value ranges from 1 to 50.

    +

    Specifies the maximum timeout duration for each health check. The value ranges from 1 to 50.

    Max. Retries

    +

    Max. Retries

    Specifies the maximum number of health check retries. The value ranges from 1 to 10.

    +

    Specifies the maximum number of health check retries. The value ranges from 1 to 10.

    CoreDNS is a DNS server that provides domain name resolution for Kubernetes clusters through a chain add-on.

    NGINX Ingress Controller

    +

    This add-on forwards application data such as the data of virtual hosts, load balancers, SSL proxy, and HTTP routing for Services that can be directly accessed outside a cluster.

    +
    diff --git a/docs/cce/umn/cce_10_0302.html b/docs/cce/umn/cce_10_0302.html index 8f2c4661..b7a5edda 100644 --- a/docs/cce/umn/cce_10_0302.html +++ b/docs/cce/umn/cce_10_0302.html @@ -149,361 +149,389 @@

    Deprecated APIs

    With the evolution of Kubernetes APIs, APIs are periodically reorganized or upgraded, and certain APIs are deprecated and finally deleted. The following tables list the deprecated APIs in each Kubernetes community version. For details about more deprecated APIs, see Deprecated API Migration Guide.

    - +

    When an API is deprecated, the existing resources are not affected. However, when you create or edit the resources, the API version will be intercepted.

    -
    Table 2 APIs deprecated in Kubernetes v1.27

    Resource Name

    +
    - - - - - - - - - - - - - - - - -
    Table 2 APIs deprecated in Kubernetes v1.29

    Resource

    Deprecated API Version

    +

    Deprecated API Version

    Substitute API Version

    +

    Substitute API Version

    Change Description

    +

    Change Description

    CSIStorageCapacity

    +

    FlowSchema and PriorityLevelConfiguration

    storage.k8s.io/v1beta1

    +

    flowcontrol.apiserver.k8s.io/v1beta2

    storage.k8s.io/v1

    -

    (This API is available since v1.24.)

    +

    flowcontrol.apiserver.k8s.io/v1

    +

    (This API has been available since v1.29.)

    +

    flowcontrol.apiserver.k8s.io/v1beta3

    +

    (This API has been available since v1.26.)

    None

    -

    FlowSchema and PriorityLevelConfiguration

    -

    flowcontrol.apiserver.k8s.io/v1beta1

    -

    flowcontrol.apiserver.k8s.io/v1beta3

    -

    (This API is available since v1.26.)

    -

    None

    -

    HorizontalPodAutoscaler

    -

    autoscaling/v2beta2

    -

    autoscaling/v2

    -

    (This API is available since v1.23.)

    -

    None

    +
    • Significant changes in flowcontrol.apiserver.k8s.io/v1:

      spec.limited.assuredConcurrencyShares of PriorityLevelConfiguration has been renamed spec.limited.nominalConcurrencyShares. The default value is 30 only when it is not specified, and the explicit value 0 does not change to 30.

      +
    • Key changes in flowcontrol.apiserver.k8s.io/v1beta3:

      spec.limited.assuredConcurrencyShares of PriorityLevelConfiguration has been renamed spec.limited.nominalConcurrencyShares.

      +
    -
    Table 3 APIs deprecated in Kubernetes v1.25

    Resource Name

    +
    - - - - - - - - - - + + + + + + + + +
    Table 3 APIs deprecated in Kubernetes v1.27

    Resource Name

    Deprecated API Version

    +

    Deprecated API Version

    Substitute API Version

    +

    Substitute API Version

    Change Description

    +

    Change Description

    CronJob

    +

    CSIStorageCapacity

    batch/v1beta1

    +

    storage.k8s.io/v1beta1

    batch/v1

    -

    (This API is available since v1.21.)

    +

    storage.k8s.io/v1

    +

    (This API is available since v1.24.)

    None

    +

    None

    EndpointSlice

    +

    FlowSchema and PriorityLevelConfiguration

    discovery.k8s.io/v1beta1

    +

    flowcontrol.apiserver.k8s.io/v1beta1

    discovery.k8s.io/v1

    +

    flowcontrol.apiserver.k8s.io/v1beta3

    +

    (This API is available since v1.26.)

    +

    None

    +

    HorizontalPodAutoscaler

    +

    autoscaling/v2beta2

    +

    autoscaling/v2

    +

    (This API is available since v1.23.)

    +

    None

    +
    +
    + +
    + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - -
    Table 4 APIs deprecated in Kubernetes v1.25

    Resource Name

    +

    Deprecated API Version

    +

    Substitute API Version

    +

    Change Description

    +

    CronJob

    +

    batch/v1beta1

    +

    batch/v1

    +

    (This API is available since v1.21.)

    +

    None

    +

    EndpointSlice

    +

    discovery.k8s.io/v1beta1

    +

    discovery.k8s.io/v1

    (This API is available since v1.21.)

    Pay attention to the following changes:

    +

    Pay attention to the following changes:

    • In each endpoint, the topology["kubernetes.io/hostname"] field has been deprecated. Replace it with the nodeName field.
    • In each endpoint, the topology["kubernetes.io/zone"] field has been deprecated. Replace it with the zone field.
    • The topology field is replaced with deprecatedTopology and cannot be written in v1.

    Event

    +

    Event

    events.k8s.io/v1beta1

    +

    events.k8s.io/v1beta1

    events.k8s.io/v1

    +

    events.k8s.io/v1

    (This API is available since v1.19.)

    Pay attention to the following changes:

    +

    Pay attention to the following changes:

    • The type field can only be set to Normal or Warning.
    • The involvedObject field is renamed regarding.
    • The action, reason, reportingController, and reportingInstance fields are mandatory for creating a new events.k8s.io/v1 event.
    • Use eventTime instead of the deprecated firstTimestamp field (this field has been renamed deprecatedFirstTimestamp and is not allowed to appear in the new events.k8s.io/v1 event object).
    • Use series.lastObservedTime instead of the deprecated lastTimestamp field (this field has been renamed deprecatedLastTimestamp and is not allowed to appear in the new events.k8s.io/v1 event object).
    • Use series.count instead of the deprecated count field (this field has been renamed deprecatedCount and is not allowed to appear in the new events.k8s.io/v1 event object).
    • Use reportingController instead of the deprecated source.component field (this field has been renamed deprecatedSource.component and is not allowed to appear in the new events.k8s.io/v1 event object).
    • Use reportingInstance instead of the deprecated source.host field (this field has been renamed deprecatedSource.host and is not allowed to appear in the new events.k8s.io/v1 event object).

    HorizontalPodAutoscaler

    +

    HorizontalPodAutoscaler

    autoscaling/v2beta1

    +

    autoscaling/v2beta1

    autoscaling/v2

    +

    autoscaling/v2

    (This API is available since v1.23.)

    None

    +

    None

    PodDisruptionBudget

    +

    PodDisruptionBudget

    policy/v1beta1

    +

    policy/v1beta1

    policy/v1

    +

    policy/v1

    (This API is available since v1.21.)

    If spec.selector is set to null ({}) in PodDisruptionBudget of policy/v1, all pods in the namespace are selected. (In policy/v1beta1, an empty spec.selector means that no pod will be selected.) If spec.selector is not specified, pod will be selected in neither API version.

    +

    If spec.selector is set to null ({}) in PodDisruptionBudget of policy/v1, all pods in the namespace are selected. (In policy/v1beta1, an empty spec.selector means that no pod will be selected.) If spec.selector is not specified, pod will be selected in neither API version.

    PodSecurityPolicy

    +

    PodSecurityPolicy

    policy/v1beta1

    +

    policy/v1beta1

    None

    +

    None

    Since v1.25, the PodSecurityPolicy resource no longer provides APIs of the policy/v1beta1 version, and the PodSecurityPolicy access controller is deleted.

    +

    Since v1.25, the PodSecurityPolicy resource no longer provides APIs of the policy/v1beta1 version, and the PodSecurityPolicy access controller is deleted.

    Use Pod Security Admission instead.

    RuntimeClass

    +

    RuntimeClass

    node.k8s.io/v1beta1

    +

    node.k8s.io/v1beta1

    node.k8s.io/v1 (This API is available since v1.20.)

    +

    node.k8s.io/v1 (This API is available since v1.20.)

    None

    +

    None

    -
    Table 4 APIs deprecated in Kubernetes v1.22

    Resource Name

    +
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Table 5 APIs deprecated in Kubernetes v1.22

    Resource Name

    Deprecated API Version

    +

    Deprecated API Version

    Substitute API Version

    +

    Substitute API Version

    Change Description

    +

    Change Description

    MutatingWebhookConfiguration

    +

    MutatingWebhookConfiguration

    ValidatingWebhookConfiguration

    admissionregistration.k8s.io/v1beta1

    +

    admissionregistration.k8s.io/v1beta1

    admissionregistration.k8s.io/v1

    +

    admissionregistration.k8s.io/v1

    (This API is available since v1.16.)

    • The default value of webhooks[*].failurePolicy is changed from Ignore to Fail in v1.
    • The default value of webhooks[*].matchPolicy is changed from Exact to Equivalent in v1.
    • The default value of webhooks[*].timeoutSeconds is changed from 30s to 10s in v1.
    • The default value of webhooks[*].sideEffects is deleted, and this field must be specified. In v1, the value can only be None or NoneOnDryRun.
    • The default value of webhooks[*].admissionReviewVersions is deleted. In v1, this field must be specified. (AdmissionReview v1 and v1beta1 are supported.)
    • webhooks[*].name must be unique in the list of objects created through admissionregistration.k8s.io/v1.
    +
    • The default value of webhooks[*].failurePolicy is changed from Ignore to Fail in v1.
    • The default value of webhooks[*].matchPolicy is changed from Exact to Equivalent in v1.
    • The default value of webhooks[*].timeoutSeconds is changed from 30s to 10s in v1.
    • The default value of webhooks[*].sideEffects is deleted, and this field must be specified. In v1, the value can only be None or NoneOnDryRun.
    • The default value of webhooks[*].admissionReviewVersions is deleted. In v1, this field must be specified. (AdmissionReview v1 and v1beta1 are supported.)
    • webhooks[*].name must be unique in the list of objects created through admissionregistration.k8s.io/v1.

    CustomResourceDefinition

    +

    CustomResourceDefinition

    apiextensions.k8s.io/v1beta1

    +

    apiextensions.k8s.io/v1beta1

    apiextensions/v1

    +

    apiextensions/v1

    (This API is available since v1.16.)

    • The default value of spec.scope is no longer Namespaced. This field must be explicitly specified.
    • spec.version is deleted from v1. Use spec.versions instead.
    • spec.validation is deleted from v1. Use spec.versions[*].schema instead.
    • spec.subresources is deleted from v1. Use spec.versions[*].subresources instead.
    • spec.additionalPrinterColumns is deleted from v1. Use spec.versions[*].additionalPrinterColumns instead.
    • spec.conversion.webhookClientConfig is moved to spec.conversion.webhook.clientConfig in v1.
    • spec.conversion.conversionReviewVersions is moved to spec.conversion.webhook.conversionReviewVersions in v1.
    +
    • The default value of spec.scope is no longer Namespaced. This field must be explicitly specified.
    • spec.version is deleted from v1. Use spec.versions instead.
    • spec.validation is deleted from v1. Use spec.versions[*].schema instead.
    • spec.subresources is deleted from v1. Use spec.versions[*].subresources instead.
    • spec.additionalPrinterColumns is deleted from v1. Use spec.versions[*].additionalPrinterColumns instead.
    • spec.conversion.webhookClientConfig is moved to spec.conversion.webhook.clientConfig in v1.
    • spec.conversion.conversionReviewVersions is moved to spec.conversion.webhook.conversionReviewVersions in v1.
    • spec.versions[*].schema.openAPIV3Schema becomes a mandatory field when the CustomResourceDefinition object of the v1 version is created, and its value must be a structural schema.
    • spec.preserveUnknownFields: true cannot be specified when the CustomResourceDefinition object of the v1 version is created. This configuration must be specified using x-kubernetes-preserve-unknown-fields: true in the schema definition.
    • In v1, the JSONPath field in the additionalPrinterColumns entry is renamed jsonPath (patch #66531).

    APIService

    +

    APIService

    apiregistration/v1beta1

    +

    apiregistration/v1beta1

    apiregistration.k8s.io/v1

    +

    apiregistration.k8s.io/v1

    (This API is available since v1.10.)

    None

    +

    None

    TokenReview

    +

    TokenReview

    authentication.k8s.io/v1beta1

    +

    authentication.k8s.io/v1beta1

    authentication.k8s.io/v1

    +

    authentication.k8s.io/v1

    (This API is available since v1.6.)

    None

    +

    None

    LocalSubjectAccessReview

    +

    LocalSubjectAccessReview

    SelfSubjectAccessReview

    SubjectAccessReview

    SelfSubjectRulesReview

    authorization.k8s.io/v1beta1

    +

    authorization.k8s.io/v1beta1

    authorization.k8s.io/v1

    +

    authorization.k8s.io/v1

    (This API is available since v1.16.)

    spec.group was renamed spec.groups in v1 (patch #32709).

    +

    spec.group was renamed spec.groups in v1 (patch #32709).

    CertificateSigningRequest

    +

    CertificateSigningRequest

    certificates.k8s.io/v1beta1

    +

    certificates.k8s.io/v1beta1

    certificates.k8s.io/v1

    +

    certificates.k8s.io/v1

    (This API is available since v1.19.)

    Pay attention to the following changes in certificates.k8s.io/v1:
    • For an API client that requests a certificate:
      • spec.signerName becomes a mandatory field (see Known Kubernetes Signers). In addition, the certificates.k8s.io/v1 API cannot be used to create requests whose signer is kubernetes.io/legacy-unknown.
      • spec.usages now becomes a mandatory field, which cannot contain duplicate string values and can contain only known usage strings.
      +
    Pay attention to the following changes in certificates.k8s.io/v1:
    • For an API client that requests a certificate:
      • spec.signerName becomes a mandatory field (see Known Kubernetes Signers). In addition, the certificates.k8s.io/v1 API cannot be used to create requests whose signer is kubernetes.io/legacy-unknown.
      • spec.usages now becomes a mandatory field, which cannot contain duplicate string values and can contain only known usage strings.
    • For an API client that needs to approve or sign a certificate:
      • status.conditions cannot contain duplicate types.
      • The status.conditions[*].status field is now mandatory.
      • The status.certificate must be PEM-encoded and can contain only the CERTIFICATE data block.

    Lease

    +

    Lease

    coordination.k8s.io/v1beta1

    +

    coordination.k8s.io/v1beta1

    coordination.k8s.io/v1

    +

    coordination.k8s.io/v1

    (This API is available since v1.14.)

    None

    +

    None

    Ingress

    +

    Ingress

    networking.k8s.io/v1beta1

    +

    networking.k8s.io/v1beta1

    extensions/v1beta1

    networking.k8s.io/v1

    +

    networking.k8s.io/v1

    (This API is available since v1.19.)

    • The spec.backend field is renamed spec.defaultBackend.
    • The serviceName field of the backend is renamed service.name.
    • The backend servicePort field represented by a number is renamed service.port.number.
    • The backend servicePort field represented by a string is renamed service.port.name.
    • The pathType field is mandatory for all paths to be specified. The options are Prefix, Exact, and ImplementationSpecific. To match the behavior of not defining the path type in v1beta1, use ImplementationSpecific.
    +
    • The spec.backend field is renamed spec.defaultBackend.
    • The serviceName field of the backend is renamed service.name.
    • The backend servicePort field represented by a number is renamed service.port.number.
    • The backend servicePort field represented by a string is renamed service.port.name.
    • The pathType field is mandatory for all paths to be specified. The options are Prefix, Exact, and ImplementationSpecific. To match the behavior of not defining the path type in v1beta1, use ImplementationSpecific.

    IngressClass

    +

    IngressClass

    networking.k8s.io/v1beta1

    +

    networking.k8s.io/v1beta1

    networking.k8s.io/v1

    +

    networking.k8s.io/v1

    (This API is available since v1.19.)

    None

    +

    None

    ClusterRole

    +

    ClusterRole

    ClusterRoleBinding

    Role

    RoleBinding

    rbac.authorization.k8s.io/v1beta1

    +

    rbac.authorization.k8s.io/v1beta1

    rbac.authorization.k8s.io/v1

    +

    rbac.authorization.k8s.io/v1

    (This API is available since v1.8.)

    None

    +

    None

    PriorityClass

    +

    PriorityClass

    scheduling.k8s.io/v1beta1

    +

    scheduling.k8s.io/v1beta1

    scheduling.k8s.io/v1

    +

    scheduling.k8s.io/v1

    (This API is available since v1.14.)

    None

    +

    None

    CSIDriver

    +

    CSIDriver

    CSINode

    StorageClass

    VolumeAttachment

    storage.k8s.io/v1beta1

    +

    storage.k8s.io/v1beta1

    storage.k8s.io/v1

    +

    storage.k8s.io/v1

    • CSIDriver is available in storage.k8s.io/v1 since v1.19.
    • CSINode is available in storage.k8s.io/v1 since v1.17.
    • StorageClass is available in storage.k8s.io/v1 since v1.6.
    • VolumeAttachment is available in storage.k8s.io/v1 since v1.13.
    +
    • CSIDriver is available in storage.k8s.io/v1 since v1.19.
    • CSINode is available in storage.k8s.io/v1 since v1.17.
    • StorageClass is available in storage.k8s.io/v1 since v1.6.
    • VolumeAttachment is available in storage.k8s.io/v1 since v1.13.
    -
    @@ -103,7 +106,7 @@ csi.storage.k8s.io/fstype: ext4 # (Optional) Set the file system type to - diff --git a/docs/cce/umn/cce_10_0381.html b/docs/cce/umn/cce_10_0381.html index 5be36ad3..16641bfd 100644 --- a/docs/cce/umn/cce_10_0381.html +++ b/docs/cce/umn/cce_10_0381.html @@ -45,7 +45,7 @@ metadata: namespace: default annotations: everest.io/disk-volume-type: SSD # EVS disk type, which must be the same as that of the snapshot's source EVS disk. - everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags + everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags csi.storage.k8s.io/fstype: xfs # (Optional) Configure this field when the snapshot file system is of the xfs type. labels: failure-domain.beta.kubernetes.io/region: <your_region> # Replace the region with the one where the EVS disk is located. diff --git a/docs/cce/umn/cce_10_0385.html b/docs/cce/umn/cce_10_0385.html index 4d741bb6..f238a4de 100644 --- a/docs/cce/umn/cce_10_0385.html +++ b/docs/cce/umn/cce_10_0385.html @@ -507,7 +507,7 @@ spec: diff --git a/docs/cce/umn/cce_10_0390.html b/docs/cce/umn/cce_10_0390.html new file mode 100644 index 00000000..d6ab0603 --- /dev/null +++ b/docs/cce/umn/cce_10_0390.html @@ -0,0 +1,26 @@ + + +

    Creating Nginx Ingresses on the Console

    +

    Prerequisites

    • An ingress provides network access for backend workloads. Ensure that a workload is available in a cluster. If no workload is available, deploy a workload by referring to Creating a Deployment, Creating a StatefulSet, or Creating a DaemonSet.
    • A ClusterIP or NodePort Service has been configured for the workload. For details about how to configure the Service, see ClusterIP or NodePort.
    • To add Nginx Ingress, ensure that the Nginx Ingress Controller add-on has been installed in the cluster. For details, see Installing the Add-on.
    +
    +

    Notes and Constraints

    • It is not recommended modifying any configuration of a load balancer on the ELB console. Otherwise, the Service will be abnormal. If you have modified the configuration, uninstall the nginx-ingress add-on and reinstall it.
    • The URL registered in an ingress forwarding policy must be the same as the URL used to access the backend Service. Otherwise, a 404 error will be returned.
    • The selected or created load balancer must be in the same VPC as the current cluster, and it must match the load balancer type (private or public network).
    • The load balancer has at least two listeners, and ports 80 and 443 are not occupied by listeners.
    +
    +

    Creating an Nginx Ingress

    This section uses an Nginx workload as an example to describe how to create an Nginx ingress.

    +
    1. Log in to the CCE console and click the cluster name to access the cluster console.
    2. Choose Services & Ingresses in the navigation pane, click the Ingresses tab, and click Create Ingress in the upper right corner.
    3. Configure ingress parameters.

      • Name: Customize the name of an ingress, for example, nginx-ingress-demo.
      • Namespace: Select the namespace to which the ingress is to be added.
      • nginx-ingress: This option is displayed only after the NGINX Ingress Controller add-on is installed in the cluster.
        • Ingress Class: Select the name of the Nginx Ingress controller installed in the cluster. You can install multiple Nginx Ingress controllers and customize controller names as needed.
        • External Protocol: The options are HTTP and HTTPS. The default number of the listening port reserved when Nginx Ingress Controller is installed is 80 for HTTP and 443 for HTTPS. To use HTTPS, configure a certificate.
        • Certificate Source: source of a certificate for encrypting and authenticating HTTPS data transmission.
          • If you select a TLS key, you must create a key certificate of the IngressTLS or kubernetes.io/tls type beforehand. For details, see Creating a Secret.
          • If you select the default certificate, Nginx Ingress Controller will use its default certificate for encryption and authentication. You can configure the default certificate during NGINX Ingress Controller installation. If the default certificate is not configured, the certificate provided by Nginx Ingress Controller will be used.
          +
        • SNI: stands for Server Name Indication (SNI), which is an extended protocol of TLS. SNI allows multiple TLS-compliant domain names for external access using the same IP address and port number, and different domain names can use different security certificates. After SNI is enabled, the client is allowed to submit the requested domain name when initiating a TLS handshake request. After receiving the TLS request, the load balancer searches for the certificate based on the domain name in the request. If the certificate corresponding to the domain name is found, the load balancer returns the certificate for authorization. Otherwise, the default certificate (server certificate) is returned for authorization.
        +
      • Forwarding Policy: When the access address of a request matches the forwarding policy (a forwarding policy consists of a domain name and URL), the request is forwarded to the corresponding target Service for processing. Click Add Forwarding Policies to add multiple forwarding policies.
        • Domain Name: actual domain name. Ensure that the entered domain name has been registered and archived. After the ingress is created, bind the domain name to the IP address of the automatically created load balancer (IP address of the ingress access address). If a domain name rule is configured, the domain name must always be used for access.
        • Path Matching Rule:
          • Default: Prefix match is used by default.
          • Prefix match: If the URL is set to /healthz, the URL that meets the prefix can be accessed, for example, /healthz/v1 and /healthz/v2.
          • Exact match: The URL can be accessed only when it is fully matched. For example, if the URL is set to /healthz, only /healthz can be accessed.
          +
        • Path: access path to be registered, for example, /healthz.
          • The access path matching rule of Nginx Ingress is based on the path prefix separated by the slash (/) and is case-sensitive. If the subpath separated by a slash (/) matches the prefix, the access is normal. However, if the prefix is only a part of the character string in the subpath, the access is not matched. For example, if the URL is set to /healthz, /healthz/v1 is matched, but /healthzv1 is not matched.
          • The access path added here must exist in the backend application. Otherwise, the forwarding fails.

            For example, the default access URL of the Nginx application is /usr/share/nginx/html. When adding /test to the ingress forwarding policy, ensure the access URL of your Nginx application contains /usr/share/nginx/html/test. Otherwise, error 404 will be returned.

            +
          +
          +
        • Destination Service: Select an existing Service or create a Service. Services that do not meet search criteria are automatically filtered out.
        • Destination Service Port: Select the access port of the destination Service.
        • Operation: Click Delete to delete the configuration.
        +
      • Annotation: The value is in the format of key:value. You can use annotations to query the configurations supported by nginx-ingress.
      +

    4. Click OK.

      After the ingress is created, it is displayed in the ingress list.

      +

    +
    +
    +
    + +
    + diff --git a/docs/cce/umn/cce_10_0405.html b/docs/cce/umn/cce_10_0405.html index fae1bb4d..9df637e4 100644 --- a/docs/cce/umn/cce_10_0405.html +++ b/docs/cce/umn/cce_10_0405.html @@ -2,13 +2,13 @@

    Patch Version Release Notes

    Version 1.29

    -
    Table 5 APIs deprecated in Kubernetes v1.16

    Resource Name

    +
    - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0307.html b/docs/cce/umn/cce_10_0307.html index 435e09cd..fcb6d9ba 100644 --- a/docs/cce/umn/cce_10_0307.html +++ b/docs/cce/umn/cce_10_0307.html @@ -100,9 +100,9 @@ - - diff --git a/docs/cce/umn/cce_10_0351.html b/docs/cce/umn/cce_10_0351.html index b4b4039a..1516c372 100644 --- a/docs/cce/umn/cce_10_0351.html +++ b/docs/cce/umn/cce_10_0351.html @@ -1,7 +1,7 @@

    CPU Policy

    -

    Scenarios

    By default, kubelet uses CFS quotas to enforce pod CPU limits. When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduling time. Many workloads are not sensitive to this migration and thus work fine without any intervention. Some applications are CPU-sensitive. They are sensitive to:

    +

    Scenarios

    By default, kubelet uses CFS quotas to enforce pod CPU limits. When a node runs many CPU-bound pods, the workload can move to different CPU cores depending on whether the pod is throttled and which CPU cores are available at scheduling time. Many workloads are not sensitive to this migration and thus work fine without any intervention. Some applications are CPU-sensitive. They are sensitive to:

    • CPU throttling
    • Context switching
    • Processor cache misses
    • Cross-socket memory access
    • Hyperthreads that are expected to run on the same physical CPU card

    If your workloads are sensitive to any of these items and CPU cache affinity and scheduling latency significantly affect workload performance, kubelet allows alternative CPU management policies (CPU binding) to determine some placement preferences on the node. The CPU manager preferentially allocates resources on a socket and full physical cores to avoid interference.

    diff --git a/docs/cce/umn/cce_10_0363.html b/docs/cce/umn/cce_10_0363.html index 35187b37..05dc8273 100644 --- a/docs/cce/umn/cce_10_0363.html +++ b/docs/cce/umn/cce_10_0363.html @@ -34,7 +34,7 @@
    - @@ -92,10 +92,10 @@

    Advanced Settings

    Expand the area and configure the following parameters:

    -
    • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
    • Data Disk Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. BMS nodes do not support data disk encryption that is available only in certain regions. For details, see the console.
      • Not encrypted is selected by default.
      • If you select Enabled (key) for Data Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
      • If you select Enabled (KMS key ID) for Data Disk Encryption, enter a KMS key (which can be shared by others) in the current region.
      +
      • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
      • Data Disk Encryption: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting.
        • Not encrypted is selected by default.
        • If you select Enabled (key) for Data Disk Encryption, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the text box.
        • If you select Enabled (KMS key ID) for Data Disk Encryption, enter a KMS key (which can be shared by others) in the current region.

      Adding data disks

      -

      A maximum of 16 data disks can be attached to an ECS and 10 to a BMS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

      +

      A maximum of 16 data disks can be attached to an ECS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

      • Default: By default, a raw disk is created without any processing.
      • Mount Disk: The data disk is attached to a specified directory.
      • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
      • Use as ephemeral volume: applicable when there is a high performance requirement on emptyDir.
      NOTE:
      • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
      • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
      diff --git a/docs/cce/umn/cce_10_0364.html b/docs/cce/umn/cce_10_0364.html new file mode 100644 index 00000000..395c4510 --- /dev/null +++ b/docs/cce/umn/cce_10_0364.html @@ -0,0 +1,154 @@ + + +

      Using kubectl to Create an Nginx Ingress

      +

      Scenario

      This section uses an Nginx workload as an example to describe how to create an Nginx ingress using kubectl.

      +
      +

      Prerequisites

      +
      +

      Ingress Description of networking.k8s.io/v1

      In CCE clusters of v1.23 or later, the ingress version is switched to networking.k8s.io/v1.

      +

      Compared with v1beta1, v1 has the following differences in parameters:

      +
      • The ingress type is changed from kubernetes.io/ingress.class in annotations to spec.ingressClassName.
      • The format of backend is changed.
      • The pathType parameter must be specified for each path. The options are as follows:
        • ImplementationSpecific: The matching method depends on Ingress Controller. The matching method defined by ingress.beta.kubernetes.io/url-match-mode is used in CCE, which is the same as v1beta1.
        • Exact: exact matching of the URL, which is case-sensitive.
        • Prefix: matching based on the URL prefix separated by a slash (/). The match is case-sensitive, and elements in the path are matched one by one. A path element refers to a list of labels in the path separated by a slash (/).
        +
      +

      +
      +

      Creating an Nginx Ingress

      1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
      2. Create a YAML file named ingress-test.yaml. The file name can be customized.

        vi ingress-test.yaml

        +

        Starting from cluster v1.23, the ingress version is switched from networking.k8s.io/v1beta1 to networking.k8s.io/v1. For details about the differences between v1 and v1beta1, see Ingress Description of networking.k8s.io/v1.

        +
        +

        The following uses HTTP as an example to describe how to configure the YAML file:

        +
        For clusters of v1.23 or later:
        apiVersion: networking.k8s.io/v1
        +kind: Ingress
        +metadata:
        +  name: ingress-test
        +spec:
        +  rules:
        +    - host: ''
        +      http:
        +        paths:
        +          - path: /
        +            backend:
        +              service:
        +                name: <your_service_name>  # Replace it with the name of your target Service.
        +                port:
        +                  number: <your_service_port>  # Replace it with the port number of your target Service.
        +            property:
        +              ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
        +            pathType: ImplementationSpecific
        +  ingressClassName: nginx   # Nginx Ingress is used. If multiple Nginx Ingress controllers are installed in the cluster, replace nginx with the custom name of the controller associated with the ingress.
        +
        +
        For clusters of v1.21 or earlier:
        apiVersion: networking.k8s.io/v1beta1
        +kind: Ingress
        +metadata:
        +  name: ingress-test
        +  namespace: default
        +  annotations:
        +    kubernetes.io/ingress.class: nginx   # Nginx Ingress is used.
        +spec:
        +  rules:
        +    - host: ''
        +      http:
        +        paths:
        +          - path: '/'
        +            backend:
        +              serviceName: <your_service_name>  # Replace it with the name of your target Service.
        +              servicePort: <your_service_port>  # Replace it with the port number of your target Service.
        + +
    Table 6 APIs deprecated in Kubernetes v1.16

    Resource Name

    Deprecated API Version

    +

    Deprecated API Version

    Substitute API Version

    +

    Substitute API Version

    Change Description

    +

    Change Description

    NetworkPolicy

    +

    NetworkPolicy

    extensions/v1beta1

    +

    extensions/v1beta1

    networking.k8s.io/v1

    +

    networking.k8s.io/v1

    (This API is available since v1.8.)

    None

    +

    None

    DaemonSet

    +

    DaemonSet

    extensions/v1beta1

    +

    extensions/v1beta1

    apps/v1beta2

    apps/v1

    +

    apps/v1

    (This API is available since v1.9.)

    • The spec.templateGeneration field is deleted.
    • spec.selector is now a mandatory field and cannot be changed after the object is created. The label of an existing template can be used as a selector for seamless migration.
    • The default value of spec.updateStrategy.type is changed to RollingUpdate (the default value in the extensions/v1beta1 API version is OnDelete).
    +
    • The spec.templateGeneration field is deleted.
    • spec.selector is now a mandatory field and cannot be changed after the object is created. The label of an existing template can be used as a selector for seamless migration.
    • The default value of spec.updateStrategy.type is changed to RollingUpdate (the default value in the extensions/v1beta1 API version is OnDelete).

    Deployment

    +

    Deployment

    extensions/v1beta1

    +

    extensions/v1beta1

    apps/v1beta1

    apps/v1beta2

    apps/v1

    +

    apps/v1

    (This API is available since v1.9.)

    • The spec.rollbackTo field is deleted.
    • spec.selector is now a mandatory field and cannot be changed after the Deployment is created. The label of an existing template can be used as a selector for seamless migration.
    • The default value of spec.progressDeadlineSeconds is changed to 600 seconds (the default value in extensions/v1beta1 is unlimited).
    • The default value of spec.revisionHistoryLimit is changed to 10. (In the apps/v1beta1 API version, the default value of this field is 2. In the extensions/v1beta1 API version, all historical records are retained by default.)
    • The default values of maxSurge and maxUnavailable are changed to 25%. (In the extensions/v1beta1 API version, these fields default to 1.)
    +
    • The spec.rollbackTo field is deleted.
    • spec.selector is now a mandatory field and cannot be changed after the Deployment is created. The label of an existing template can be used as a selector for seamless migration.
    • The default value of spec.progressDeadlineSeconds is changed to 600 seconds (the default value in extensions/v1beta1 is unlimited).
    • The default value of spec.revisionHistoryLimit is changed to 10. (In the apps/v1beta1 API version, the default value of this field is 2. In the extensions/v1beta1 API version, all historical records are retained by default.)
    • The default values of maxSurge and maxUnavailable are changed to 25%. (In the extensions/v1beta1 API version, these fields default to 1.)

    StatefulSet

    +

    StatefulSet

    apps/v1beta1

    +

    apps/v1beta1

    apps/v1beta2

    apps/v1

    +

    apps/v1

    (This API is available since v1.9.)

    • spec.selector is now a mandatory field and cannot be changed after the StatefulSet is created. The label of an existing template can be used as a selector for seamless migration.
    • The default value of spec.updateStrategy.type is changed to RollingUpdate (the default value in the apps/v1beta1 API version is OnDelete).
    +
    • spec.selector is now a mandatory field and cannot be changed after the StatefulSet is created. The label of an existing template can be used as a selector for seamless migration.
    • The default value of spec.updateStrategy.type is changed to RollingUpdate (the default value in the apps/v1beta1 API version is OnDelete).

    ReplicaSet

    +

    ReplicaSet

    extensions/v1beta1

    +

    extensions/v1beta1

    apps/v1beta1

    apps/v1beta2

    apps/v1

    +

    apps/v1

    (This API is available since v1.9.)

    spec.selector is now a mandatory field and cannot be changed after the object is created. The label of an existing template can be used as a selector for seamless migration.

    +

    spec.selector is now a mandatory field and cannot be changed after the object is created. The label of an existing template can be used as a selector for seamless migration.

    PodSecurityPolicy

    +

    PodSecurityPolicy

    extensions/v1beta1

    +

    extensions/v1beta1

    policy/v1beta1

    +

    policy/v1beta1

    (This API is available since v1.10.)

    PodSecurityPolicy for the policy/v1beta1 API version will be removed in v1.25.

    +

    PodSecurityPolicy for the policy/v1beta1 API version will be removed in v1.25.

    Access mode

    Accessible only after being mounted to ECSs or BMSs and initialized.

    +

    Accessible only after being attached to ECSs and initialized.

    Mounted to ECSs or BMSs using network protocols. A network address must be specified or mapped to a local directory for access.

    +

    Mounted to ECSs using network protocols. A network address must be specified or mapped to a local directory for access.

    Supports the Network File System (NFS) protocol (NFSv3 only). You can seamlessly integrate existing applications and tools with SFS Turbo.

    Specifications

    Select node flavors as needed. A node needs at least two vCPU cores and 4 GiB of memory.

    +

    Select node flavors as needed. A node needs at least two vCPU cores and 4 GiB of memory. For the supported node flavors, see Node Flavor Description.

    The available node flavors vary depending on AZs. Obtain the flavors displayed on the console.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Table 1 Key parameters

    Parameter

    +

    Mandatory

    +

    Type

    +

    Description

    +

    kubernetes.io/ingress.class

    +

    Yes (only for clusters of v1.21 or earlier)

    +

    String

    +

    nginx: indicates that Nginx Ingress is used. This option is available only after the NGINX Ingress Controller add-on is installed.

    +

    This parameter is mandatory when an ingress is created by calling the API.

    +

    ingressClassName

    +

    Yes

    +

    (only for clusters of v1.23 or later)

    +

    String

    +

    nginx: indicates that Nginx Ingress is used. This option is available only after the NGINX Ingress Controller add-on is installed. If multiple Nginx Ingress controllers are installed in the cluster, replace nginx with the custom name of the controller associated with the ingress.

    +

    Multiple NGINX Ingress Controller add-ons can be installed in one cluster if the add-on version is 2.5.4 or later. In this case, the value of this parameter must be the controller name customized during controller installation, which indicates that the ingress is managed by the controller.

    +

    This parameter is mandatory when an ingress is created by calling the API.

    +

    host

    +

    No

    +

    String

    +

    Domain name for accessing the Service. By default, this parameter is left blank, and the domain name needs to be fully matched. Ensure that the domain name has been registered and archived. Once a domain name rule is configured, you must use the domain name for access.

    +

    path

    +

    Yes

    +

    String

    +

    User-defined route path. All external access requests must match host and path.

    +
    NOTE:
    • The access path matching rule of Nginx Ingress is based on the path prefix separated by the slash (/) and is case-sensitive. If the subpath separated by a slash (/) matches the prefix, the access is normal. However, if the prefix is only a part of the character string in the subpath, the access is not matched. For example, if the URL is set to /healthz, /healthz/v1 is matched, but /healthzv1 is not matched.
    • The access path added here must exist in the backend application. Otherwise, the forwarding fails.

      For example, the default access URL of the Nginx application is /usr/share/nginx/html. When adding /test to the ingress forwarding policy, ensure the access URL of your Nginx application contains /usr/share/nginx/html/test. Otherwise, error 404 will be returned.

      +
    +
    +

    ingress.beta.kubernetes.io/url-match-mode

    +

    No

    +

    String

    +

    Route matching policy.

    +

    Default: STARTS_WITH (prefix match)

    +

    Options:

    +
    • EQUAL_TO: exact match
    • STARTS_WITH: prefix match
    +

    pathType

    +

    Yes

    +

    String

    +
    Path type. This field is supported only by clusters of v1.23 or later.
    • ImplementationSpecific: The matching method depends on Ingress Controller. The matching method defined by ingress.beta.kubernetes.io/url-match-mode is used in CCE.
    • Exact: exact matching of the URL, which is case-sensitive.
    • Prefix: prefix matching, which is case-sensitive. With this method, the URL path is separated into multiple elements by slashes (/) and the elements are matched one by one. If each element in the URL matches the path, the subpaths of the URL can be routed normally.
      NOTE:
      • During prefix matching, each element must be exactly matched. If the last element of the URL is the substring of the last element in the request path, no matching is performed. For example, /foo/bar matches /foo/bar/baz but does not match /foo/barbaz.
      • When elements are separated by slashes (/), if the URL or request path ends with a slash (/), the slash (/) at the end is ignored. For example, /foo/bar matches /foo/bar/.
      +
      +
    +
    +

    See examples of ingress path matching.

    +
    +
    + +

  • Create an ingress.

    kubectl create -f ingress-test.yaml

    +

    If information similar to the following is displayed, the ingress has been created.

    +
    ingress/ingress-test created
    +

    View the created ingress.

    +

    kubectl get ingress

    +

    If information similar to the following is displayed, the ingress has been created and the workload is accessible.

    +
    NAME             HOSTS     ADDRESS          PORTS   AGE
    +ingress-test     *         121.**.**.**     80      10s
    +

  • Enter http://121.**.**.**:80 in the address box of the browser to access the workload (for example, Nginx workload).

    121.**.**.** indicates the IP address of the unified load balancer.

    +

  • + + +
    + +
    + diff --git a/docs/cce/umn/cce_10_0378.html b/docs/cce/umn/cce_10_0378.html index 3cccf002..500cac23 100644 --- a/docs/cce/umn/cce_10_0378.html +++ b/docs/cce/umn/cce_10_0378.html @@ -170,8 +170,8 @@ metadata: everest.io/reclaim-policy: retain-volume-only name: pv-evs-test labels: - failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed - failure-domain.beta.kubernetes.io/zone: <your_zone> # AZ of the node where the application is to be deployed + failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed + failure-domain.beta.kubernetes.io/zone: <your_zone> # AZ of the node where the application is to be deployed spec: accessModes: - ReadWriteOnce diff --git a/docs/cce/umn/cce_10_0380.html b/docs/cce/umn/cce_10_0380.html index 8c643b79..f5ec058e 100644 --- a/docs/cce/umn/cce_10_0380.html +++ b/docs/cce/umn/cce_10_0380.html @@ -82,6 +82,9 @@ csi.storage.k8s.io/fstype: ext4 # (Optional) Set the file system type to

    EVS

    +

    +

    +

    csi.storage.k8s.io/csi-driver-name

    Yes

    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    +
    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    • GPSSD: general-purpose SSD
    • ESSD: extreme SSD

    If the pod uses hostNetwork, the ELB forwards the request to the host network after this annotation is used.

    Options:

    -
    • true: enabled
    • false (default): disabled
    +
    • true: enabled
    • false (default): disabled

    v1.9 or later

    Table 1 Release notes for the v1.29 patch

    CCE Cluster Patch Version

    +
    - - - @@ -41,13 +41,13 @@

    Version 1.28

    -
    Table 1 Release notes for the v1.29 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    Table 2 Release notes for the v1.28 patch

    CCE Cluster Patch Version

    +
    - - - @@ -118,13 +118,13 @@

    Version 1.27

    dockershim has been removed since Kubernetes v1.24, and Docker is not supported in v1.24 and later versions by default. Use containerd.

    -
    Table 2 Release notes for the v1.28 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    Table 3 Release notes for the v1.27 patch

    CCE Cluster Patch Version

    +
    - - - @@ -205,13 +205,13 @@

    Version 1.25

    All nodes in the CCE clusters of version 1.25, except the ones running EulerOS 2.5, use containerd by default.

    -
    Table 3 Release notes for the v1.27 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    Table 4 Release notes for the v1.25 patch

    CCE Cluster Patch Version

    +
    - - - @@ -322,13 +322,13 @@

    Version 1.23

    -
    Table 4 Release notes for the v1.25 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    Table 5 Release notes for the v1.23 patch

    CCE Cluster Patch Version

    +
    - - - @@ -451,13 +451,13 @@

    Version 1.21

    -
    Table 5 Release notes for the v1.23 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    Table 6 Release notes for the v1.21 patch

    CCE Cluster Patch Version

    +
    - - - @@ -568,13 +568,13 @@

    Version 1.19

    -
    Table 6 Release notes for the v1.21 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    - @@ -132,7 +135,7 @@ csi.storage.k8s.io/fstype: ext4 # (Optional) Set the file system type to - diff --git a/docs/cce/umn/cce_bestpractice_0310.html b/docs/cce/umn/cce_bestpractice_0310.html index f63ce4ce..b2ee83dd 100644 --- a/docs/cce/umn/cce_bestpractice_0310.html +++ b/docs/cce/umn/cce_bestpractice_0310.html @@ -1,14 +1,14 @@

    Installing the Migration Tool

    -

    Velero is an open-source backup and migration tool for Kubernetes clusters. It integrates the persistent volume (PV) data backup capability of the Restic tool and can be used to back up Kubernetes resource objects (such as Deployments, jobs, Services, and ConfigMaps) in the source cluster. Data in the PV mounted to the pod is backed up and uploaded to the object storage. When a disaster occurs or migration is required, the target cluster can use Velero to obtain the corresponding backup data from OBS and restore cluster resources as required.

    -

    According to Migration Solution, prepare temporary object storage to store backup files before the migration. Velero supports OSB or MinIO as the object storage. OBS requires sufficient storage space for storing backup files. You can estimate the storage space based on your cluster scale and data volume. You are advised to use OBS for backup. For details about how to deploy Velero, see Installing Velero.

    +

    Velero is an open-source backup and migration tool for Kubernetes clusters. With Restic's PV data backup capability integrated into it, Velero can back up Kubernetes resource objects (such as Deployments, jobs, Services, and ConfigMaps) in source clusters and data in PVs mounted to pods and uploaded them to object storage. When a disaster occurs or migration is required, a target cluster can obtain the corresponding backup data from the object storage using Velero and restore cluster resources as required.

    +

    According to Migration Solution, prepare temporary object storage to store backup files before the migration. Velero supports OBS or MinIO as the object storage. The object storage requires sufficient storage space for storing backup files. You can estimate the storage space based on your cluster scale and data volume. OBS buckets are recommended for data backup. For details about how to deploy Velero, see Installing Velero.

    Prerequisites

    • The Kubernetes version of the source on-premises cluster must be 1.10 or later, and the cluster can use DNS and Internet services properly.
    • If you use OBS to store backup files, obtain the AK/SK of a user who has the right to operate OBS. For details, see Access Keys.
    • If you use MinIO to store backup files, bind an EIP to the server where MinIO is installed and enable the API and console port of MinIO in the security group.
    • The target CCE cluster has been created.
    • The source cluster and target cluster must each have at least one idle node. It is recommended that the node specifications be 4 vCPUs and 8 GiB memory or higher.

    (Optional) Installing MinIO

    MinIO is an open-source, high-performance object storage tool compatible with the S3 API protocol. If MinIO is used to store backup files for cluster migration, you need a temporary server to deploy MinIO and provide services for external systems. If you use OBS to store backup files, skip this section and go to Installing Velero.

    MinIO can be installed in any of the following locations:

    • Temporary ECS outside the cluster

      If the MinIO server is installed outside the cluster, backup files will not be affected when a catastrophic fault occurs in the cluster.

      -
    • Idle nodes in the cluster
      You can remotely log in to a node to install MinIO or install the containerized MinIO. For details, see Velero official document.

      For example, to install MinIO in a container, run the following command:

      +
    • Idle nodes in the cluster
      You can remotely log in to a node and install MinIO or install the containerized MinIO. For details, see Velero official documentation.

      For example, to install MinIO in a container, run the following command:

      • The storage type in the YAML file provided by Velero is emptyDir. You are advised to change the storage type to HostPath or Local. Otherwise, backup files will be permanently lost after the container is restarted.
      • Ensure that the MinIO service is accessible externally. Otherwise, backup files cannot be downloaded outside the cluster. You can change the Service type to NodePort or use other types of public network access Services.
      @@ -20,7 +20,7 @@ mkdir /opt/miniodata cd /opt/minio wget https://dl.minio.io/server/minio/release/linux-amd64/minio chmod +x minio -

    • Set the username and password of MinIO.

      The username and password set using this method are temporary environment variables and must be reset after the service is restarted. Otherwise, the default root credential minioadmin:minioadmin will be used to create the service.
      export MINIO_ROOT_USER=minio
      +

    • Set the username and password of MinIO.

      The username and password configured using this method are temporary environment variables and must be reset after the service is restarted. Otherwise, the default root credential minioadmin:minioadmin will be used to create the service.
      export MINIO_ROOT_USER=minio
       export MINIO_ROOT_PASSWORD=minio123

    • Create a service. In the command, /opt/miniodata/ indicates the local disk path for MinIO to store data.

      The default API port of MinIO is 9000, and the console port is randomly generated. You can use the --console-address parameter to specify a console port.
      ./minio server /opt/miniodata/ --console-address ":30840" &
      diff --git a/docs/cce/umn/cce_bestpractice_0315.html b/docs/cce/umn/cce_bestpractice_0315.html index a7bf75c9..5301ac81 100644 --- a/docs/cce/umn/cce_bestpractice_0315.html +++ b/docs/cce/umn/cce_bestpractice_0315.html @@ -8,13 +8,13 @@
      diff --git a/docs/cce/umn/cce_bestpractice_0317.html b/docs/cce/umn/cce_bestpractice_0317.html index 94d95849..35c86b8c 100644 --- a/docs/cce/umn/cce_bestpractice_0317.html +++ b/docs/cce/umn/cce_bestpractice_0317.html @@ -1,6 +1,6 @@ -

      Suggestions on CCE Cluster Security Configuration

      +

      Configuration Suggestions on CCE Cluster Security

      For security purposes, you are advised to configure a cluster as follows.

      Using the CCE Cluster of the Latest Version

      Kubernetes releases a major version in about four months. CCE follows the same frequency as Kubernetes to release major versions. To be specific, a new CCE version is released about three months after a new Kubernetes version is released in the community. For example, Kubernetes v1.19 was released in September 2020 and CCE v1.19 was released in March 2021.

      The latest cluster version has known vulnerabilities fixed or provides a more comprehensive security protection mechanism. You are advised to select the latest cluster version when creating a cluster. Before a cluster version is deprecated and removed, upgrade your cluster to a supported version.

      diff --git a/docs/cce/umn/cce_bestpractice_0318.html b/docs/cce/umn/cce_bestpractice_0318.html index 2e7c3dc4..eacf61f7 100644 --- a/docs/cce/umn/cce_bestpractice_0318.html +++ b/docs/cce/umn/cce_bestpractice_0318.html @@ -1,6 +1,6 @@ -

      Suggestions on CCE Node Security Configuration

      +

      Configuration Suggestions on CCE Node Security

      Preventing Nodes from Being Exposed to Public Networks

      • Do not bind an EIP to a node unless necessary to reduce the attack surface.
      • If an EIP must be used, properly configure the firewall or security group rules to restrict access of unnecessary ports and IP addresses.

      You may have configured the kubeconfig.json file on a node in your cluster. kubectl can use the certificate and private key in this file to control the entire cluster. You are advised to delete unnecessary files from the /root/.kube directory on the node to prevent malicious use.

      rm -rf /root/.kube

      diff --git a/docs/cce/umn/cce_bestpractice_0319.html b/docs/cce/umn/cce_bestpractice_0319.html index e4241ef4..bab99a87 100644 --- a/docs/cce/umn/cce_bestpractice_0319.html +++ b/docs/cce/umn/cce_bestpractice_0319.html @@ -1,7 +1,9 @@ -

      Security Configuration Suggestions for Using Containers in CCE Clusters

      +

      Configuration Suggestions on CCE Container Security

      Controlling the Pod Scheduling Scope

      The nodeSelector or nodeAffinity is used to limit the range of nodes to which applications can be scheduled, preventing the entire cluster from being threatened due to the exceptions of a single application.

      +

      To achieve strong isolation, like in logical multi-tenancy situations, it is important to have system add-ons run on separate nodes or node pools. This helps keep them separated from service pods and reduces the risk of privilege escalation within a cluster. To do this, you can set the node affinity policy to either Node Affinity or Specified Node Pool Scheduling on the add-on installation page.

      +

      Suggestions on Container Security Configuration

      • Set the computing resource limits (request and limit) of a container. This prevents the container from occupying too many resources and affecting the stability of the host and other containers on the same node.
      • Unless necessary, do not mount sensitive host directories to containers, such as /, /boot, /dev, /etc, /lib, /proc, /sys, and /usr.
      • Do not run the sshd process in containers unless necessary.
      • Unless necessary, it is not recommended that containers and hosts share the network namespace.
      • Unless necessary, it is not recommended that containers and hosts share the process namespace.
      • Unless necessary, it is not recommended that containers and hosts share the IPC namespace.
      • Unless necessary, it is not recommended that containers and hosts share the UTS namespace.
      • Unless necessary, do not mount the sock file of Docker to any container.
      @@ -69,7 +71,7 @@ spec:
    • Container tunnel network:
      iptables -I FORWARD -s {container_cidr} -d {Private API server IP} -j REJECT

    {container_cidr} indicates the container CIDR of the cluster, for example, 10.0.0.0/16.

    -

    To ensure configuration persistence, you are advised to write the command to the /etc/rc.local script.

    +

    To ensure configuration persistence, write the command to the /etc/rc.local script.

  • CCE Turbo cluster: Add an outbound rule to the ENI security group of the cluster.
    1. Log in to the VPC console.
    2. In the navigation pane, choose Access Control > Security Groups.
    3. Locate the ENI security group corresponding to the cluster and name it in the format of {Cluster name}-cce-eni-{Random ID}. Click the security group name and configure rules.
    4. Click the Outbound Rules tab and click Add Rule to add an outbound rule for the security group.
      • Priority: Set it to 1.
      • Action: Select Deny, indicating that the access to the destination address is denied.
      • Type: Select IPv4.
      • Protocol & Port: Enter 5443 based on the port in the intranet API server address.
      • Destination: Select IP address and enter the IP address of the internal API server.
    5. Click OK.
  • diff --git a/docs/cce/umn/cce_bestpractice_0320.html b/docs/cce/umn/cce_bestpractice_0320.html index 5f942aad..67673d48 100644 --- a/docs/cce/umn/cce_bestpractice_0320.html +++ b/docs/cce/umn/cce_bestpractice_0320.html @@ -1,6 +1,6 @@ -

    Security Configuration Suggestions for Using Secrets in CCE Clusters

    +

    Configuration Suggestions on CCE Secret Security

    Currently, CCE has configured static encryption for secret resources. The secrets created by users will be encrypted and stored in etcd of the CCE cluster. Secrets can be used in two modes: environment variable and file mounting. No matter which mode is used, CCE still transfers the configured data to users. Therefore, it is recommended that:

    1. Do not record sensitive information in logs.
    2. For the secret that uses the file mounting mode, the default file permission mapped in the container is 0644. Configure stricter permissions for the file. For example:
      apiversion: v1
       kind: Pod
      @@ -46,7 +46,7 @@ spec:
           - name: secret-volume
             readOnly: true
             mountPath: "/etc/secret-volume"
      -

      In this way, .secret-file cannot be viewed by running the ls -l command in the /etc/secret-volume/ directory, but can be viewed by running the ls -al command.

      +

      In this way, .secret-file cannot be seen by running ls -l in the /etc/secret-volume/ directory, but can be viewed by running ls -al.

    3. Encrypt sensitive information before creating a secret and decrypt the information when using it.

    Using a Bound ServiceAccount Token to Access a Cluster

    The secret-based ServiceAccount token does not support expiration time or auto update. In addition, after the mounting pod is deleted, the token is still stored in the secret. Token leakage may incur security risks. A bound ServiceAccount token is recommended for CCE clusters of version 1.23 or later. In this mode, the expiration time can be set and is the same as the pod lifecycle, reducing token leakage risks. Example:

    apiVersion: apps/v1
    @@ -92,7 +92,7 @@ spec:
                             apiVersion: v1
                             fieldPath: metadata.namespace
                           path: namespace
    -

    For details, visit https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/.

    +

    For details, see Managing Service Accounts.

    diff --git a/docs/cce/umn/cce_bestpractice_0350.html b/docs/cce/umn/cce_bestpractice_0350.html index 718ac4ff..16d4f175 100644 --- a/docs/cce/umn/cce_bestpractice_0350.html +++ b/docs/cce/umn/cce_bestpractice_0350.html @@ -1,7 +1,11 @@

    Avoiding Occasional DNS Resolution Timeout Caused by IPVS Defects

    -

    When the kube-proxy uses IPVS load balancing, you may encounter DNS resolution timeout occasionally during CoreDNS scale-in or restart. This problem is caused by a Linux kernel defect. For details, see https://github.com/torvalds/linux/commit/35dfb013149f74c2be1ff9c78f14e6a3cd1539d1.

    +

    Description

    When kube-proxy uses IPVS load balancing, you may encounter DNS resolution timeout occasionally during CoreDNS scale-in or restart.

    +

    This problem is caused by a Linux kernel defect. For details, see https://github.com/torvalds/linux/commit/35dfb013149f74c2be1ff9c78f14e6a3cd1539d1.

    +
    +

    Solution

    You can use NodeLocal DNSCache to minimize the impact of IPVS defects.

    +
    - @@ -94,6 +95,13 @@ + + + +
    Table 7 Release notes for the v1.19 patch

    CCE Cluster Patch Version

    +
    - - - diff --git a/docs/cce/umn/cce_10_0435.html b/docs/cce/umn/cce_10_0435.html index 7991839a..8e317c66 100644 --- a/docs/cce/umn/cce_10_0435.html +++ b/docs/cce/umn/cce_10_0435.html @@ -1,9 +1,10 @@

    SSH Connectivity of Master Nodes

    -

    Check Items

    Check whether CCE can connect to your master nodes.

    +

    Check Items

    Check whether your master nodes can be accessed using SSH.

    -

    Solution

    Contact technical support.

    +

    Solution

    There is a low probability that the SSH connectivity check fails due to network fluctuations. Perform the pre-upgrade check again.

    +

    If the check still fails, submit a service ticket to contact technical support.

    diff --git a/docs/cce/umn/cce_10_0452.html b/docs/cce/umn/cce_10_0452.html index bdc317fb..6fa05bcb 100644 --- a/docs/cce/umn/cce_10_0452.html +++ b/docs/cce/umn/cce_10_0452.html @@ -1,9 +1,10 @@ -

    Node CPUs

    -

    Check Items

    Check whether the number of CPUs on the master node is greater than 2.

    +

    Node CPU Cores

    +

    Check Items

    Check and make sure that the master nodes in your cluster have more than 2 CPU cores.

    -

    Solution

    If the number of CPUs on the master node is 2, contact technical support to expand the number to 4 or more.

    +

    Solution

    The number of CPU cores on the master nodes is 2, which may lead to a cluster upgrade failure.

    +

    Contact technical support to expand the number of CPU cores to four or more.

    diff --git a/docs/cce/umn/cce_10_0458.html b/docs/cce/umn/cce_10_0458.html index f926d8c3..82a7ccba 100644 --- a/docs/cce/umn/cce_10_0458.html +++ b/docs/cce/umn/cce_10_0458.html @@ -1,9 +1,10 @@ -

    Internal Errors

    -

    Check Items

    Before the upgrade, check whether an internal error occurs.

    +

    Internal Error

    +

    Check Items

    This check item is not typical and implies that an internal error was found during the pre-upgrade check.

    -

    Solution

    If this check fails, contact technical support.

    +

    Solution

    Perform the pre-upgrade check again.

    +

    If it fails again, submit a service ticket to contact technical support.

    diff --git a/docs/cce/umn/cce_10_0476.html b/docs/cce/umn/cce_10_0476.html index d844384f..0fcb5df1 100644 --- a/docs/cce/umn/cce_10_0476.html +++ b/docs/cce/umn/cce_10_0476.html @@ -43,7 +43,7 @@
    - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - diff --git a/docs/cce/umn/cce_10_0550.html b/docs/cce/umn/cce_10_0550.html index 2dfbbe7a..634e672c 100644 --- a/docs/cce/umn/cce_10_0550.html +++ b/docs/cce/umn/cce_10_0550.html @@ -47,7 +47,7 @@ - @@ -57,7 +57,7 @@ - @@ -65,7 +65,7 @@ - @@ -117,6 +117,8 @@ +
    Table 7 Release notes for the v1.19 patch

    CCE Cluster Patch Version

    Kubernetes Version

    +

    Kubernetes Version

    Feature Updates

    +

    Feature Updates

    Optimization

    +

    Optimization

    Vulnerability Fixing

    √

    5.10.0-60.18.0.50.r865_35.hce2.x86_64

    +

    5.10.0-60.18.0.50.r865_35.hce2.x86_64

    v1.27

    @@ -54,7 +54,7 @@

    √

    5.10.0-60.18.0.50.r865_35.hce2.x86_64

    +

    5.10.0-60.18.0.50.r865_35.hce2.x86_64

    v1.25

    @@ -65,7 +65,7 @@

    √

    5.10.0-60.18.0.50.r865_35.hce2.x86_64

    +

    5.10.0-60.18.0.50.r865_35.hce2.x86_64

    Ubuntu 22.04

    @@ -78,7 +78,7 @@

    √

    5.15.0-53-generic

    +

    5.15.0-53-generic

    v1.28

    @@ -89,7 +89,7 @@

    √

    5.15.0-53-generic

    +

    5.15.0-53-generic

    v1.27

    @@ -100,7 +100,7 @@

    √

    5.15.0-53-generic

    +

    5.15.0-53-generic

    v1.25

    @@ -111,7 +111,7 @@

    √

    5.15.0-53-generic

    +

    5.15.0-53-generic

    EulerOS release 2.9

    @@ -124,7 +124,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    v1.28

    @@ -135,7 +135,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    v1.27

    @@ -146,7 +146,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    v1.25

    @@ -157,7 +157,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    v1.23

    @@ -168,7 +168,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    v1.21

    @@ -179,7 +179,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    v1.19

    @@ -190,7 +190,7 @@

    √

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    +

    4.18.0-147.5.1.6.h998.eulerosv2r9.x86_64

    EulerOS release 2.5

    diff --git a/docs/cce/umn/cce_10_0479.html b/docs/cce/umn/cce_10_0479.html index 08e91a1e..a7e7f310 100644 --- a/docs/cce/umn/cce_10_0479.html +++ b/docs/cce/umn/cce_10_0479.html @@ -1,9 +1,10 @@ -

    cce-hpa-controller Restrictions

    -

    Check Items

    Check whether the current cce-controller-hpa add-on has compatibility restrictions.

    +

    cce-hpa-controller Limitations

    +

    Check Items

    Check whether there are compatibility limitations between the current and target cce-controller-hpa add-on versions.

    -

    Solution

    The current cce-controller-hpa add-on has compatibility restrictions. An add-on that can provide metric APIs, for example, metric-server, must be installed in the cluster.

    +

    Solution

    There are compatibility limitations between the current and target versions of the cce-controller-hpa add-on. To address this, install an add-on that provides metrics APIs, like metrics-server, in the cluster.

    +

    Install the metrics add-on in the cluster and try again.

    diff --git a/docs/cce/umn/cce_10_0484.html b/docs/cce/umn/cce_10_0484.html index 20dfabc9..f433025f 100644 --- a/docs/cce/umn/cce_10_0484.html +++ b/docs/cce/umn/cce_10_0484.html @@ -1,9 +1,13 @@

    Health of Worker Node Components

    -

    Check Items

    Check whether the container runtime and network components on the worker nodes are healthy.

    +

    Check Items

    Check whether the container runtime and network components on the worker nodes are healthy.

    -

    Solution

    If a worker node component malfunctions, log in to the node to check the status of the component and rectify the fault.

    +

    Solution

    • Issue 1: CNI Agent is not active.

      If your cluster version is earlier than v1.17.17, or you are using the underlay_ipvlan network model and your cluster version is later than v1.17.17, log in to the node and run the systemctl status canal command to check the status of canal. If you encounter an error, run the systemctl restart canal command and check the status again.

      +

      If you are using the VPC or Cloud Native 2.0 network model and your cluster version is later than v1.17.17, log in to the node and run the systemctl status yangtse command to check the status of Yangtse. If you encounter an error, run the systemctl restart yangtse command and check the status again.

      +
    • Issue 2: kube-proxy is not active.

      Log in to the node and run the systemctl is-active kube-proxy command to check the status of kube-proxy. If you encounter an error, run the systemctl restart kube-proxy command and check the status again.

      +

      If the fault persists, reset the node. For details, see Resetting a Node.

      +
    diff --git a/docs/cce/umn/cce_10_0485.html b/docs/cce/umn/cce_10_0485.html index b40f36cf..286d2299 100644 --- a/docs/cce/umn/cce_10_0485.html +++ b/docs/cce/umn/cce_10_0485.html @@ -1,9 +1,10 @@

    Health of Master Node Components

    -

    Check Items

    Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.

    +

    Check Items

    Check whether cluster components such as the Kubernetes component, container runtime component, and network component are running properly before the upgrade.

    -

    Solution

    If a master node component malfunctions, contact technical support.

    +

    Solution

    Perform the pre-upgrade check again.

    +

    If it fails again, submit a service ticket to contact technical support.

    diff --git a/docs/cce/umn/cce_10_0501.html b/docs/cce/umn/cce_10_0501.html index 3761d95b..a946b8f7 100644 --- a/docs/cce/umn/cce_10_0501.html +++ b/docs/cce/umn/cce_10_0501.html @@ -1,9 +1,10 @@

    Historical Upgrade Records

    -

    Check Items

    Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.

    +

    Check Items

    Check the historical upgrade records of the cluster and confirm that the current version of the cluster meets the requirements for upgrading to the target version.

    -

    Solution

    If the source version of the cluster is earlier than v1.11, it is risky to upgrade the cluster to a version later than v1.23. In this case, contact technical support.

    +

    Solution

    Upgrading your cluster from an earlier version can be risky and may result in this issue. To avoid this, it is recommended that you migrate the cluster beforehand.

    +

    If you still want to proceed with the cluster upgrade, submit a service ticket to contact technical support for evaluation.

    diff --git a/docs/cce/umn/cce_10_0502.html b/docs/cce/umn/cce_10_0502.html index 61d533cd..2f20676a 100644 --- a/docs/cce/umn/cce_10_0502.html +++ b/docs/cce/umn/cce_10_0502.html @@ -1,9 +1,10 @@

    CIDR Block of the Cluster Management Plane

    -

    Check Items

    Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.

    +

    Check Items

    Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.

    -

    Solution

    If the CIDR block of the cluster management plane is different from that configured on the backbone network, contact technical support.

    +

    Solution

    The CIDR block of the management plane has been modified in your region. As a result, the CIDR block of the management plane and that of the backbone network are inconsistent.

    +

    Submit a service ticket to contact technical support to modify the settings, and recheck the configurations.

    diff --git a/docs/cce/umn/cce_10_0504.html b/docs/cce/umn/cce_10_0504.html index 6c49978c..727c11e6 100644 --- a/docs/cce/umn/cce_10_0504.html +++ b/docs/cce/umn/cce_10_0504.html @@ -1,10 +1,10 @@

    Nodes' System Parameters

    -

    Check Items

    Check whether the default system parameter settings on your nodes are modified.

    +

    Check Items

    Check whether the default system parameter settings on your nodes are modified.

    -

    Solution

    If the MTU value of the bond0 network on your BMS node is not the default value 1500, this check item failed.

    -

    Non-default parameter settings may lead to service packet loss. Change them back to the default values.

    +

    Solution

    If the MTU value of the bond0 network on your BMS node is not the default value 1500, this check item failed.

    +

    Non-default parameter settings may lead to service packet loss. Change them back to the default values.

    diff --git a/docs/cce/umn/cce_10_0507.html b/docs/cce/umn/cce_10_0507.html index 989f687d..d9e15178 100644 --- a/docs/cce/umn/cce_10_0507.html +++ b/docs/cce/umn/cce_10_0507.html @@ -1,9 +1,10 @@

    Node Swap

    -

    Check Items

    Check whether swap has been enabled on cluster nodes.

    +

    Check Items

    Check whether swap has been enabled on CCE nodes.

    -

    Solution

    By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of disabling this function. Run the swapoff -a command to disable swap.

    +

    Solution

    By default, swap is disabled on CCE nodes. Check the necessity of enabling swap manually and determine the impact of disabling this function.

    +

    If there is no impact, run the swapoff -a command to disable swap and perform the check again.

    diff --git a/docs/cce/umn/cce_10_0508.html b/docs/cce/umn/cce_10_0508.html new file mode 100644 index 00000000..769e5231 --- /dev/null +++ b/docs/cce/umn/cce_10_0508.html @@ -0,0 +1,75 @@ + + +

    nginx-ingress Upgrade

    +

    Check Items

    • Check item 1: Check whether there is an Nginx Ingress route whose ingress type is not specified (kubernetes.io/ingress.class: nginx is not added to annotations) in the cluster.
    • Check item 2: Check whether the DefaultBackend Service specified by the Nginx Ingress Controller backend is available.
    +
    +

    Fault Locating

    For Check Item 1

    +

    For Nginx Ingress, check the YAML. If the ingress type is not specified in the YAML file and the ingress is managed by the Nginx Ingress Controller, the ingress is at risk.

    +
    1. Check the Ingress type.

      Run the following command:
      kubectl get ingress <ingress-name> -oyaml | grep -E ' kubernetes.io/ingress.class: | ingressClassName:'
      +
      • Fault scenario: If the command output is empty, the Ingress type is not specified.
      • Normal scenario: The command output is not empty, indicating that the Ingress type has been specified by annotations or ingressClassName.

        +
      +
      +

    2. Ensure that the Ingress is managed by the Nginx Ingress Controller. The LoadBalancer Ingresses are not affected by this issue.

      • For clusters of v1.19, confirm this issue using managedFields.
        kubectl get ingress <ingress-name> -oyaml | grep 'manager: nginx-ingress-controller'
        +

        +
      • For clusters of other versions, check the logs of the Nginx Ingress Controller pod.
         kubectl logs -nkube-system cceaddon-nginx-ingress-controller-545db6b4f7-bv74t | grep 'updating Ingress status'
        +

        +
      +

      If the fault persists, contact technical support personnel.

      +

    +

    For Check Item 2

    +
    1. View the DefaultBackend Service in the namespace where the Nginx Ingress Controller is deployed.

      kubectl get pod cceaddon-nginx-ingress-<controller-name>-controller-*** -n <namespace> -oyaml | grep 'default-backend'
      +

      In the preceding command, cceaddon-nginx-ingress-<controller-name>-controller-*** is the controller pod name, <controller-name> is the controller name specified during add-on installation, and <namespace> is the namespace where the controller is deployed.

      +

      Command output:

      +
      - '--default-backend-service=<namespace>/<backend-svc-name>'
      +

      In the preceding command, <backend-svc-name> is the name of the DefaultBackend Service for the Nginx Ingress Controller.

      +

    2. Check whether the DefaultBackend Service of the Nginx Ingress Controller is available.

      kubectl get svc <backend-svc-name> -n <namespace>
      +

      If the Service is unavailable, this check item failed.

      +

    +
    +

    Solution

    For Check Item 1

    +

    Add an annotation to the Nginx ingresses as follows:

    +
    kubectl annotate ingress <ingress-name> kubernetes.io/ingress.class=nginx
    +

    There is no need to add this annotation to LoadBalancer ingresses. Verify that these ingresses are managed by Nginx Ingress Controller.

    +
    +

    For Check Item 2

    +
    Create the DefaultBackend Service again.
    • If a custom DefaultBackend Service has been specified in the default 404 service configuration during add-on installation, create the same Service.
    • If the default DefaultBackend Service is used during add-on installation, the re-created YAML example is as follows:
      apiVersion: v1
      +kind: Service
      +metadata:
      +  name: cceaddon-nginx-ingress-<controller-name>-default-backend   # <controller-name> is the controller name.
      +  namespace: kube-system
      +  labels:
      +    app: nginx-ingress-<controller-name>
      +    app.kubernetes.io/managed-by: Helm
      +    chart: nginx-ingress-<version>    # <version> is the add-on version.
      +    component: default-backend
      +    heritage: Helm
      +    release: cceaddon-nginx-ingress-<controller-name>
      +  annotations:
      +    meta.helm.sh/release-name: cceaddon-nginx-ingress-<controller-name>
      +    meta.helm.sh/release-namespace: kube-system    # Namespace where the add-on is installed
      +spec:
      +  ports:
      +    - name: http
      +      protocol: TCP
      +      port: 80
      +      targetPort: http
      +  selector:
      +    app: nginx-ingress-<controller-name>
      +    component: default-backend
      +    release: cceaddon-nginx-ingress-<controller-name>
      +  type: ClusterIP
      +  sessionAffinity: None
      +  ipFamilies:
      +    - IPv4
      +  ipFamilyPolicy: SingleStack
      +  internalTrafficPolicy: Cluster
      +
    +
    +
    +
    + + diff --git a/docs/cce/umn/cce_10_0510.html b/docs/cce/umn/cce_10_0510.html index c7285c82..be7e26c9 100644 --- a/docs/cce/umn/cce_10_0510.html +++ b/docs/cce/umn/cce_10_0510.html @@ -1,9 +1,10 @@

    containerd Pod Restart Risks

    -

    Check Items

    Check whether the service pods running on a containerd node are restarted when containerd is upgraded.

    +

    Check Items

    Check whether the service pods running on a containerd node are restarted when containerd is upgraded.

    -

    Solution

    Upgrade the cluster when the impact on services is controllable (for example, during off-peak hours) to minimize the impact. If you need help, contact O&M personnel.

    +

    Solution

    containerd on your node may need to be restarted. To minimize the impact on service containers, upgrade the cluster during controllable times, such as off-peak hours.

    +

    If you need help, submit a service ticket to contact O&M personnel.

    diff --git a/docs/cce/umn/cce_10_0512.html b/docs/cce/umn/cce_10_0512.html index bdf15700..8a158418 100644 --- a/docs/cce/umn/cce_10_0512.html +++ b/docs/cce/umn/cce_10_0512.html @@ -1,9 +1,10 @@

    GPU Pod Rebuild Risks

    -

    Check Items

    Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.

    +

    Check Items

    Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.

    -

    Solution

    Upgrade the cluster when the impact on services is controllable (for example, during off-peak hours) to minimize the impact. If you need help, contact O&M personnel.

    +

    Solution

    Upgrade the cluster when the impact on services is controllable (for example, during off-peak hours) to minimize the impact.

    +

    If you need help, submit a service ticket to contact O&M personnel.

    diff --git a/docs/cce/umn/cce_10_0513.html b/docs/cce/umn/cce_10_0513.html index 79f66d7f..f8716daa 100644 --- a/docs/cce/umn/cce_10_0513.html +++ b/docs/cce/umn/cce_10_0513.html @@ -1,9 +1,10 @@

    ELB Listener Access Control

    -

    Check Items

    Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are correct.

    +

    Check Items

    Check whether ELB listener access control has been configured for the Services in the current cluster using annotations.

    +

    If so, check whether their configurations are correct.

    -

    Solution

    In case of an incorrect configuration, contact O&M personnel.

    +

    Solution

    In case of an incorrect configuration, contact O&M personnel.

    diff --git a/docs/cce/umn/cce_10_0514.html b/docs/cce/umn/cce_10_0514.html index 2faf54b5..4d986057 100644 --- a/docs/cce/umn/cce_10_0514.html +++ b/docs/cce/umn/cce_10_0514.html @@ -1,9 +1,10 @@

    Master Node Flavor

    -

    Check Items

    Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.

    +

    Check Items

    Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.

    -

    Solution

    Flavor inconsistency is typically due to a modification made on the master nodes. After the cluster is upgraded, the modification of the master nodes may be restored. If the impact of the restoration cannot be evaluated, contact O&M personnel.

    +

    Solution

    This issue is typically caused by modifications made to the master node. This upgrade may reset the node.

    +

    If you are unsure about the impact, submit a service ticket to contact O&M personnel.

    diff --git a/docs/cce/umn/cce_10_0515.html b/docs/cce/umn/cce_10_0515.html index fcd12deb..ee09f815 100644 --- a/docs/cce/umn/cce_10_0515.html +++ b/docs/cce/umn/cce_10_0515.html @@ -1,9 +1,10 @@

    Subnet Quota of Master Nodes

    -

    Check Items

    Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.

    +

    Check Items

    Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.

    -

    Solution

    If the number of IP addresses in the selected cluster subnet is insufficient, rolling upgrade is not supported. Contact O&M personnel for support.

    +

    Solution

    Rolling upgrade is not supported if there are not enough IP addresses in the selected cluster subnet.

    +

    Move nodes out of the target subnet and check again. If you are unsure about the impact of migration, submit a service ticket to contact O&M personnel.

    diff --git a/docs/cce/umn/cce_10_0516.html b/docs/cce/umn/cce_10_0516.html index 0deb1f01..1ad89ffa 100644 --- a/docs/cce/umn/cce_10_0516.html +++ b/docs/cce/umn/cce_10_0516.html @@ -1,9 +1,10 @@

    Node Runtime

    -

    Check Items

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    +

    Check Items

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    -

    Solution

    If the runtime on your node is not containerd, change the runtime of the node to containerd by resetting the node.

    +

    Solution

    If your node's runtime is not containerd, change it to containerd by resetting the node.

    +

    diff --git a/docs/cce/umn/cce_10_0517.html b/docs/cce/umn/cce_10_0517.html index 70d6dd89..1413271c 100644 --- a/docs/cce/umn/cce_10_0517.html +++ b/docs/cce/umn/cce_10_0517.html @@ -1,9 +1,10 @@

    Node Pool Runtime

    -

    Check Items

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    +

    Check Items

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    -

    Solution

    If the runtime on your node pool is not containerd, change the runtime of the node pool to containerd by updating the node pool.

    +

    Solution

    If your node pool's runtime is not containerd, change it to containerd by updating the node pool.

    +

    diff --git a/docs/cce/umn/cce_10_0518.html b/docs/cce/umn/cce_10_0518.html index 69d68a69..8421e84e 100644 --- a/docs/cce/umn/cce_10_0518.html +++ b/docs/cce/umn/cce_10_0518.html @@ -1,9 +1,10 @@

    Number of Node Images

    -

    Check Items

    Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions such as Nginx.

    +

    Check Items

    Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions such as Nginx.

    -

    Solution

    Manually delete residual images.

    +

    Solution

    Manually delete residual images.

    +

    Perform the pre-upgrade check again.

    diff --git a/docs/cce/umn/cce_10_0549.html b/docs/cce/umn/cce_10_0549.html index ecfb323a..6e1f13ad 100644 --- a/docs/cce/umn/cce_10_0549.html +++ b/docs/cce/umn/cce_10_0549.html @@ -43,7 +43,7 @@

    SSH Connectivity of Master Nodes

    Check whether CCE can connect to your master nodes.

    +

    Check whether your master nodes can be accessed using SSH.

    6

    @@ -153,9 +153,9 @@

    21

    Node CPUs

    +

    Node CPU Cores

    Check whether the number of CPUs on the master node is greater than 2.

    +

    Check and make sure that the master nodes in your cluster have more than 2 CPU cores.

    22

    @@ -188,9 +188,9 @@

    26

    Internal Errors

    +

    Internal Error

    Before the upgrade, check whether an internal error occurs.

    +

    This check item is not typical and implies that an internal error was found during the pre-upgrade check.

    27

    @@ -216,9 +216,9 @@

    30

    cce-hpa-controller Restrictions

    +

    cce-hpa-controller Limitations

    Check whether the current cce-controller-hpa add-on has compatibility restrictions.

    +

    Check whether there are compatibility limitations between the current and target cce-controller-hpa add-on versions.

    31

    @@ -232,14 +232,14 @@

    Health of Worker Node Components

    Check whether the container runtime and network components on the worker nodes are healthy.

    +

    Check whether the container runtime and network components on the worker nodes are healthy.

    33

    Health of Master Node Components

    Check whether the Kubernetes, container runtime, and network components of the master nodes are healthy.

    +

    Check whether cluster components such as the Kubernetes component, container runtime component, and network component are running properly before the upgrade.

    34

    @@ -353,14 +353,14 @@

    Historical Upgrade Records

    Check whether the source version of the cluster is earlier than v1.11 and the target version is later than v1.23.

    +

    Check the historical upgrade records of the cluster and confirm that the current version of the cluster meets the requirements for upgrading to the target version.

    50

    CIDR Block of the Cluster Management Plane

    Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.

    +

    Check whether the CIDR block of the cluster management plane is the same as that configured on the backbone network.

    51

    @@ -374,7 +374,7 @@

    Nodes' System Parameters

    Check whether the default system parameter settings on your nodes are modified.

    +

    Check whether the default system parameter settings on your nodes are modified.

    53

    @@ -398,67 +398,74 @@

    Check whether swap has been enabled on cluster nodes.

    56

    +

    56

    +

    nginx-ingress Upgrade

    +

    Check whether there are compatibility issues that may occur during nginx-ingress upgrade.

    +

    57

    containerd Pod Restart Risks

    Check whether the service pods running on a containerd node are restarted when containerd is upgraded.

    +

    Check whether the service pods running on a containerd node are restarted when containerd is upgraded.

    57

    +

    58

    Key GPU Add-on Parameters

    Check whether the configuration of the CCE AI Suite add-on in a cluster has been intrusively modified. If so, upgrading the cluster may fail.

    58

    +

    59

    GPU Pod Rebuild Risks

    Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.

    +

    Check whether GPU service pods are rebuilt in a cluster when kubelet is restarted during the upgrade of the cluster.

    59

    +

    60

    ELB Listener Access Control

    Check whether the access control of the ELB listener has been configured for the Service in the current cluster using annotations and whether the configurations are correct.

    +

    If access control is configured, check whether the configurations are correct.

    60

    +

    61

    Master Node Flavor

    Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.

    +

    Check whether the flavor of the master nodes in the cluster is the same as the actual flavor of these nodes.

    61

    +

    62

    Subnet Quota of Master Nodes

    Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.

    +

    Check whether the number of available IP addresses in the cluster subnet supports rolling upgrade.

    62

    +

    63

    Node Runtime

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    +

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    63

    +

    64

    Node Pool Runtime

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    +

    Check whether an alarm is generated when a cluster is upgraded to v1.27 or later. Do not use Docker in clusters of versions later than 1.27.

    64

    +

    65

    Number of Node Images

    Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions such as Nginx.

    +

    Check the number of images on your node. If there are more than 1000 images, it takes a long time for Docker to start, affecting the standard Docker output and functions such as Nginx.

    Table 1 EVS disk performance specifications

    Parameter

    +
    - - - + + - - - - + + - - - - + + - - - - + + - - - - + + - - - - + + - - - - + + - - - - + + - - - - + + diff --git a/docs/cce/umn/cce_10_0614.html b/docs/cce/umn/cce_10_0614.html index 01a92985..bde20471 100644 --- a/docs/cce/umn/cce_10_0614.html +++ b/docs/cce/umn/cce_10_0614.html @@ -2,7 +2,7 @@

    Using an Existing EVS Disk Through a Static PV

    CCE allows you to create a PV using an existing EVS disk. After the PV is created, you can create a PVC and bind it to the PV. This mode applies if the underlying storage is available.

    -

    Prerequisites

    • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
    • You have created an EVS disk that meets the following requirements:
      • The EVS disk cannot be a system disk, DSS disk, or shared disk.
      • The EVS disk must be of the SCSI type (the default disk type is VBD when you create an EVS disk).
      • The EVS disk must be available and not used by other resources.
      • The AZ of the EVS disk must be the same as that of the cluster node. Otherwise, the EVS disk cannot be mounted and the pod cannot start.
      • If the EVS disk is encrypted, the key must be available.
      • EVS disks that have been partitioned are not supported.
      +

      Prerequisites

      • You have created a cluster and installed the CCE Container Storage (Everest) add-on in the cluster.
      • You have created an EVS disk that meets the following requirements:
        • The EVS disk cannot be a system disk or shared disk.
        • The EVS disk must be of the SCSI type (the default disk type is VBD when you create an EVS disk).
        • The EVS disk must be available and not used by other resources.
        • The AZ of the EVS disk must be the same as that of the cluster node. Otherwise, the EVS disk cannot be mounted and the pod cannot start.
        • If the EVS disk is encrypted, the key must be available.
        • EVS disks that have been partitioned are not supported.
      • To create a cluster using commands, ensure kubectl is used. For details, see Connecting to a Cluster Using kubectl.

      Notes and Constraints

      • EVS disks cannot be attached across AZs and cannot be used by multiple workloads, multiple pods of the same workload, or multiple tasks. Data sharing of a shared disk is not supported between nodes in a CCE cluster. If an EVS disk is attached to multiple nodes, I/O conflicts and data cache conflicts may occur. Therefore, select only one pod when creating a Deployment that uses EVS disks.
      • For clusters earlier than v1.19.10, if an HPA policy is used to scale out a workload with EVS volumes mounted, the existing pods cannot be read or written when a new pod is scheduled to another node.

        For clusters of v1.19.10 and later, if an HPA policy is used to scale out a workload with EVS volumes mounted, a new pod cannot be started because EVS disks cannot be attached.

        @@ -190,7 +190,7 @@ spec:
    - diff --git a/docs/cce/umn/cce_10_0615.html b/docs/cce/umn/cce_10_0615.html index 8872ceb6..5e7644dd 100644 --- a/docs/cce/umn/cce_10_0615.html +++ b/docs/cce/umn/cce_10_0615.html @@ -138,7 +138,7 @@ metadata: everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags csi.storage.k8s.io/fstype: xfs # (Optional) The file system is of the xfs type. If it is left blank, ext4 will be used by default. - everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage + everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage labels: failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed failure-domain.beta.kubernetes.io/zone: <your_zone> # AZ of the node where the application is to be deployed @@ -178,7 +178,7 @@ spec: - diff --git a/docs/cce/umn/cce_10_0616.html b/docs/cce/umn/cce_10_0616.html index 84fa6e9b..b0fc8504 100644 --- a/docs/cce/umn/cce_10_0616.html +++ b/docs/cce/umn/cce_10_0616.html @@ -148,7 +148,7 @@ spec: everest.io/disk-volume-tags: '{"key1":"value1","key2":"value2"}' # (Optional) Custom resource tags csi.storage.k8s.io/fstype: xfs # (Optional) The file system is of the xfs type. If it is left blank, ext4 will be used by default. - everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage + everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage labels: failure-domain.beta.kubernetes.io/region: <your_region> # Region of the node where the application is to be deployed failure-domain.beta.kubernetes.io/zone: <your_zone> # AZ of the node where the application is to be deployed @@ -207,7 +207,7 @@ spec: - diff --git a/docs/cce/umn/cce_10_0625.html b/docs/cce/umn/cce_10_0625.html index 1e710f34..ac25e7a1 100644 --- a/docs/cce/umn/cce_10_0625.html +++ b/docs/cce/umn/cce_10_0625.html @@ -125,7 +125,7 @@ kind: PersistentVolume metadata: annotations: pv.kubernetes.io/provisioned-by: everest-csi-provisioner - everest.io/reclaim-policy: retain-volume-only # Used for a subdirectory when the reclaim policy is Delete. This parameter indicates that when a PVC is deleted, the PV will be deleted but the subdirectory associated with the PV will be retained. + everest.io/reclaim-policy: retain-volume-only # Used for a subdirectory when the reclaim policy is Delete. This parameter indicates that when a PVC is deleted, the PV will be deleted but the subdirectory associated with the PV will be retained. name: pv-sfsturbo # PV name spec: accessModes: @@ -140,9 +140,9 @@ spec: everest.io/share-export-location: <your_location> # Shared path of the SFS Turbo volume storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner - everest.io/share-export-location: /a # (Optional) This parameter indicates an automatically created subdirectory, which must be an absolute path. + everest.io/share-export-location: /a # (Optional) This parameter indicates an automatically created subdirectory, which must be an absolute path. everest.io/volume-as: absolute-path # (Optional) An SFS Turbo subdirectory is used. - persistentVolumeReclaimPolicy: Retain # Reclaim policy, which can be set to Delete when subdirectories are automatically created + persistentVolumeReclaimPolicy: Retain # Reclaim policy, which can be set to Delete when subdirectories are automatically created storageClassName: csi-sfsturbo # Storage class name of the SFS Turbo file system mountOptions: [] # Mount options @@ -376,7 +376,7 @@ static - - @@ -81,7 +81,7 @@ @@ -321,7 +321,6 @@ diff --git a/docs/cce/umn/cce_10_0672.html b/docs/cce/umn/cce_10_0672.html index ead170e4..a26faf8a 100644 --- a/docs/cce/umn/cce_10_0672.html +++ b/docs/cce/umn/cce_10_0672.html @@ -6,7 +6,7 @@

    Creating a LoadBalancer Service

    1. Log in to the CCE console and click the cluster name to access the cluster console.
    2. In the navigation pane, choose Services & Ingresses. In the upper right corner, click Create Service.
    3. Configure parameters.

      • Service Name: Specify a Service name, which can be the same as the workload name.
      • Service Type: Select LoadBalancer.
      • Namespace: namespace that the workload belongs to.
      • Service Affinity: For details, see externalTrafficPolicy (Service Affinity).
        • Cluster level: The IP addresses and access ports of all nodes in a cluster can access the workload associated with the Service. Service access will cause performance loss due to route redirection, and the source IP address of the client cannot be obtained.
        • Node level: Only the IP address and access port of the node where the workload is located can access the workload associated with the Service. Service access will not cause performance loss due to route redirection, and the source IP address of the client can be obtained.
        -
      • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
      • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
      • Load Balancer: Select a load balancer type and creation mode.

        A load balancer can be dedicated or shared. A dedicated load balancer supports Network (TCP/UD), Application (HTTP/HTTPS), or Network (TCP/UD) & Application (HTTP/HTTPS).

        +
      • Selector: Add a label and click Confirm. The Service will use this label to select pods. You can also click Reference Workload Label to use the label of an existing workload. In the dialog box that is displayed, select a workload and click OK.
      • IPv6: This function is disabled by default. After this function is enabled, the cluster IP address of the Service changes to an IPv6 address. This parameter is available only in clusters of v1.15 or later with IPv6 enabled (set during cluster creation).
      • Load Balancer: Select a load balancer type and creation mode.

        A load balancer can be dedicated or shared. A dedicated load balancer supports Network (TCP/UD), Application (HTTP/HTTPS), or Network (TCP/UD) & Application (HTTP/HTTPS).

        You can select Use existing or Auto create to obtain a load balancer. For details about the configuration of different creation modes, see Table 1.
    Table 1 EVS disk performance specifications

    Parameter

    Ultra-high I/O

    +

    Extreme SSD

    High I/O

    +

    General Purpose SSD

    Common I/O

    +

    Ultra-high I/O

    +

    High I/O

    +

    Common I/O

    Max. capacity (GiB)

    +

    Max. capacity (GiB)

    • System disk: 1,024
    • Data disk: 32,768
    +
    • System disk: 1,024
    • Data disk: 32,768
    • System disk: 1,024
    • Data disk: 32,768
    +
    • System disk: 1,024
    • Data disk: 32,768
    • System disk: 1,024
    • Data disk: 32,768
    +
    • System disk: 1,024
    • Data disk: 32,768
    +
    • System disk: 1,024
    • Data disk: 32,768
    +
    • System disk: 1,024
    • Data disk: 32,768

    Max. IOPS

    +

    Max. IOPS

    50,000

    +

    128,000

    5000

    +

    20,000

    2200

    +

    50,000

    +

    5000

    +

    2200

    Max. throughput (MiB/s)

    +

    Max. throughput (MiB/s)

    350

    +

    1000

    150

    +

    250

    50

    +

    350

    +

    150

    +

    50

    Burst IOPS limit

    +

    Burst IOPS limit

    16,000

    +

    64,000

    5000

    +

    8000

    2200

    +

    16,000

    +

    5000

    +

    2200

    Disk IOPS

    +

    Disk IOPS

    Min. (50,000, 1800 + 50 x Capacity)

    +

    Min. (128,000, 1800 + 50 x Capacity)

    Min. (5000, 1800 + 8 x Capacity)

    +

    Min. (20,000, 1800 + 12 x Capacity)

    Min. (2200, 500 + 2 x Capacity)

    +

    Min. (50,000, 1800 + 50 x Capacity)

    +

    Min. (5000, 1800 + 8 x Capacity)

    +

    Min. (2200, 500 + 2 x Capacity)

    Disk throughput (MiB/s)

    +

    Disk throughput (MiB/s)

    Min. (350, 120 + 0.5 x Capacity)

    +

    Min. (1000, 120 + 0.5 x Capacity)

    Min. (150, 100 + 0.15 x Capacity)

    +

    Min. (250, 100 + 0.5 x Capacity)

    50

    +

    Min. (350, 120 + 0.5 x Capacity)

    +

    Min. (150, 100 + 0.15 x Capacity)

    +

    50

    Single-queue access latency (ms)

    +

    Single-queue access latency (ms)

    1

    +

    Sub-millisecond

    1–3

    +

    1

    5–10

    +

    1

    +

    1–3

    +

    5–10

    API name

    +

    API name

    SSD

    +

    ESSD

    SAS

    +

    GPSSD

    SATA

    +

    SSD

    +

    SAS

    +

    SATA

    Yes

    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    +
    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    • GPSSD: general-purpose SSD
    • ESSD: extreme SSD

    Yes

    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    +
    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    • GPSSD: general-purpose SSD
    • ESSD: extreme SSD

    Yes

    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    +
    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    • GPSSD: general-purpose SSD
    • ESSD: extreme SSD

    Create a PV on the CCE console.

    1. Choose Storage in the navigation pane. In the right pane, click the PVs tab. Click Create PersistentVolume in the upper right corner. In the dialog box displayed, configure parameters.
      • Volume Type: Select SFS Turbo.
      • SFS Turbo: Click Select SFS Turbo. On the page displayed, select the SFS Turbo file system that meets your requirements and click OK.
      • Subdirectory: Determine whether to use subdirectories to create PVs. Enter the absolute path of a subdirectory, for example, /a/b. Ensure that the subdirectory is available.
      • PV Name: Enter the PV name, which must be unique in a cluster.
      • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
      • Reclaim Policy: Only Retain is supported if you do not use subdirectories to create PVs. For details, see PV Reclaim Policy. If you choose to use a subdirectory to create a PV, the value of this parameter can be Delete.
      • Subdirectory Reclaim Policy: Determine whether to retain subdirectories when a PVC is deleted. This parameter must be used with PV Reclaim Policy and can be configured when PV Reclaim Policy is set to Delete.

        Retain: If a PVC is deleted, the PV will be deleted, but the subdirectories associated with the PV will be retained.

        +
    1. Choose Storage in the navigation pane. In the right pane, click the PVs tab. Click Create PersistentVolume in the upper right corner. In the dialog box displayed, configure parameters.
      • Volume Type: Select SFS Turbo.
      • SFS Turbo: Click Select SFS Turbo. On the page displayed, select the SFS Turbo file system that meets your requirements and click OK.
      • Subdirectory: Determine whether to use subdirectories to create PVs. Enter the absolute path of a subdirectory, for example, /a/b. Ensure that the subdirectory is available.
      • PV Name: Enter the PV name, which must be unique in a cluster.
      • Access Mode: SFS volumes support only ReadWriteMany, indicating that a storage volume can be mounted to multiple nodes in read/write mode. For details, see Volume Access Modes.
      • Reclaim Policy: Only Retain is supported if you do not use subdirectories to create PVs. For details, see PV Reclaim Policy. If you choose to use a subdirectory to create a PV, the value of this parameter can be Delete.
      • Subdirectory Reclaim Policy: Determine whether to retain subdirectories when a PVC is deleted. This parameter must be used with PV Reclaim Policy and can be configured when PV Reclaim Policy is set to Delete.

        Retain: If a PVC is deleted, the PV will be deleted, but the subdirectories associated with the PV will be retained.

        Delete: After a PVC is deleted, the PV and its associated subdirectories will also be deleted.

      • Mount Options: Enter the mounting parameter key-value pairs. For details, see Configuring SFS Turbo Mount Options.
    2. Click Create.
    diff --git a/docs/cce/umn/cce_10_0630.html b/docs/cce/umn/cce_10_0630.html index 9226dd51..ab6edfc6 100644 --- a/docs/cce/umn/cce_10_0630.html +++ b/docs/cce/umn/cce_10_0630.html @@ -119,7 +119,7 @@ metadata: csi.storage.k8s.io/node-publish-secret-name: <your_secret_name> # Custom secret name csi.storage.k8s.io/node-publish-secret-namespace: <your_namespace> # Namespace of the custom secret - everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage + everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage spec: accessModes: - ReadWriteMany # The value must be ReadWriteMany for OBS. diff --git a/docs/cce/umn/cce_10_0634.html b/docs/cce/umn/cce_10_0634.html index 49cc7112..16148997 100644 --- a/docs/cce/umn/cce_10_0634.html +++ b/docs/cce/umn/cce_10_0634.html @@ -108,7 +108,7 @@ metadata: name: pvc-local namespace: default annotations: - everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage + everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage spec: accessModes: - ReadWriteOnce # The value must be ReadWriteOnce for local PVs. diff --git a/docs/cce/umn/cce_10_0635.html b/docs/cce/umn/cce_10_0635.html index 6cedb86c..9be87750 100644 --- a/docs/cce/umn/cce_10_0635.html +++ b/docs/cce/umn/cce_10_0635.html @@ -122,7 +122,7 @@ spec: name: pvc-local namespace: default annotations: - everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage + everest.io/csi.volume-name-prefix: test # (Optional) PV name prefix of the automatically created underlying storage spec: accessModes: - ReadWriteOnce # The value must be ReadWriteOnce for local PVs. diff --git a/docs/cce/umn/cce_10_0649.html b/docs/cce/umn/cce_10_0649.html index 7b8d9447..58a872f1 100644 --- a/docs/cce/umn/cce_10_0649.html +++ b/docs/cce/umn/cce_10_0649.html @@ -9,7 +9,7 @@
  • Node pool sorting by priority

    Node pools are assigned priorities and sorted accordingly. The node pool with the highest priority is preferentially selected.

  • Flavor selection by priority

    When multiple node pools have the same highest priority, the flavor with the highest priority is selected according to the following rules:

    • The flavor with the highest priority in each node pool is selected.
    • If multiple flavors have the same priority, choose the one that requires the least volume of resources to meet the pod scheduling requirements.
    • If multiple flavors require the minimum volume of resources, choose one based on a balanced distribution among AZs.
    -
  • Troubleshooting if resources are insufficient or a creation failed

    If the preferred flavor is unavailable due to insufficient resources or quota in the AZ, CCE will try to use the next priority flavor in the node pool, and the original instance will enter a 5-minute cooldown period.

    +
  • Troubleshooting if resources are insufficient or a creation failed

    If the preferred flavor is unavailable due to insufficient quota in the AZ, CCE will try to use the next priority flavor in the node pool, and the original instance will enter a 5-minute cooldown period.

    If none of the flavors in a node pool can be used to create instances, CCE will try to use the next priority node pool to create instances.

  • diff --git a/docs/cce/umn/cce_10_0651.html b/docs/cce/umn/cce_10_0651.html index 5fc021e8..004b651b 100644 --- a/docs/cce/umn/cce_10_0651.html +++ b/docs/cce/umn/cce_10_0651.html @@ -220,7 +220,7 @@ spec:

    Whether to allocate an EIP with a pod and bind the EIP to the pod

    false or true

    +

    false or true

    yangtse.io/eip-network-type

    diff --git a/docs/cce/umn/cce_10_0653.html b/docs/cce/umn/cce_10_0653.html index 207d9d44..8f1b3011 100644 --- a/docs/cce/umn/cce_10_0653.html +++ b/docs/cce/umn/cce_10_0653.html @@ -80,12 +80,12 @@

    Expand the area and configure the following parameters:

    • Data Disk Space Allocation: allocates space for container engines, images, and ephemeral storage for them to run properly. For details about how to allocate data disk space, see Data Disk Space Allocation.
      NOTE:

      After the data disk space allocation configuration is modified, the modification takes effect only for new nodes. The configuration cannot take effect for the existing nodes even if they are reset.

      -
    • Enabled: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting. BMS nodes do not support data disk encryption that is available only in certain regions. For details, see the console.
      • Not encrypted is selected by default.
      • After setting Data Disk Encryption to Enabled, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the key text box.
      +
    • Enabled: Data disk encryption safeguards your data. Snapshots generated from encrypted disks and disks created using these snapshots automatically inherit the encryption setting.
      • Not encrypted is selected by default.
      • After setting Data Disk Encryption to Enabled, choose an existing key. If no key is available, click View Key List and create a key. After the key is created, click the refresh icon next to the key text box.
      NOTE:

      After the Data Disk Encryption is modified, the modification takes effect only on newly added nodes. The configuration cannot be synchronized to existing nodes even if they are reset.

    Adding data disks

    -

    A maximum of 16 data disks can be attached to an ECS and 10 to a BMS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

    +

    A maximum of 16 data disks can be attached to an ECS. By default, a raw disk is created without any processing. You can also click Expand and select any of the following options:

    • Default: By default, a raw disk is created without any processing.
    • Mount Disk: The data disk is attached to a specified directory.
    • Use as PV: applicable when there is a high performance requirement on PVs. The node.kubernetes.io/local-storage-persistent label is added to the node with PV configured. The value is linear or striped.
    • Use as ephemeral volume: applicable when there is a high performance requirement on emptyDir.
    NOTE:
    • Local PVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 2.1.23 or later. Version 2.1.23 or later is recommended.
    • Local EVs are supported only when the cluster version is v1.21.2-r0 or later and the Everest add-on version is 1.2.29 or later.
    diff --git a/docs/cce/umn/cce_10_0658.html b/docs/cce/umn/cce_10_0658.html index 101540d5..418bbe95 100644 --- a/docs/cce/umn/cce_10_0658.html +++ b/docs/cce/umn/cce_10_0658.html @@ -4,8 +4,8 @@

    You can specify a specification in a node pool for scaling.

    The default node pool does not support scaling. Use Creating a Node to add a node.

    -
    1. Log in to the CCE console.
    2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane. In the right pane, click the Node Pools tab.
    3. Choose Scaling next to the target node pool.
    4. In the displayed window, configure scaling parameters.

      • Number of Scaling Targets: The number of target nodes cannot exceed the management scale of the current cluster.
      • Node Configuration: Use the selected flavor to add nodes. If the flavor resources are insufficient, the scale-out will fail.
        • If there are fewer nodes running in the node pool than the desired number of nodes, some nodes will be added. If there are more nodes than the desired number of nodes, some nodes will be deleted.
        • During scale-in, if there are not enough nodes of the specified flavor to be deleted, nodes of other flavors will be removed.
        -
        +
        1. Log in to the CCE console.
        2. Click the cluster name to access the cluster console. Choose Nodes in the navigation pane. In the right pane, click the Node Pools tab.
        3. Choose Scaling next to the target node pool.
        4. In the displayed Node Pool Scaling window, configure scaling parameters.

          • Add or reduce nodes for scaling.
          • Use the selected flavor to increase or decrease the number of nodes.
          • Configure the number of nodes to be added or deleted.
            • When scaling out a node pool, make sure that the total number of nodes, both existing and new, does not exceed the management scale of the current cluster.
            • When scaling in a node pool, make sure that the number of nodes to be removed does not exceed the number of nodes currently in the pool.

              Scaling in can result in the unavailability of resources associated with a node, such as local storage and workloads that were scheduled to that node. Exercise caution when performing this operation to avoid impact on running services.

              +

        5. Click OK.
    diff --git a/docs/cce/umn/cce_10_0659.html b/docs/cce/umn/cce_10_0659.html index 7fc1b10e..7dce5688 100644 --- a/docs/cce/umn/cce_10_0659.html +++ b/docs/cce/umn/cce_10_0659.html @@ -69,7 +69,7 @@

    Typical scenario: Disk I/O suspension causes process suspension.

    Warning event

    -

    Listening object: /dev/kmsg

    +

    Listening object: /dev/kmsg

    Matching rule: "task \\S+:\\w+ blocked for more than \\w+ seconds\\."

    Warning event

    -

    Listening object: /dev/kmsg

    +

    Listening object: /dev/kmsg

    Matching rule: Remounting filesystem read-only

    Default threshold: 10 abnormal processes detected for three consecutive times

    Source:

    • /proc/{PID}/stat
    • Alternately, you can run the ps aux command.
    -

    Exceptional scenario: The ProcessD check item ignores the resident D processes (heartbeat and update) on which the SDI driver on the BMS node depends.

    @@ -560,7 +560,7 @@ spec: diff --git a/docs/cce/umn/cce_10_0692.html b/docs/cce/umn/cce_10_0692.html new file mode 100644 index 00000000..18abbbb3 --- /dev/null +++ b/docs/cce/umn/cce_10_0692.html @@ -0,0 +1,29 @@ + + + +

    Nginx Ingresses

    + +

    +
    + + + diff --git a/docs/cce/umn/cce_10_0693.html b/docs/cce/umn/cce_10_0693.html new file mode 100644 index 00000000..a740bcf7 --- /dev/null +++ b/docs/cce/umn/cce_10_0693.html @@ -0,0 +1,92 @@ + + +

    Configuring an HTTPS Certificate for an Nginx Ingress

    +

    HTTPS certificates can be configured for ingresses to provide security services.

    +
    1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
    2. Ingress supports two TLS secret types: kubernetes.io/tls and IngressTLS. IngressTLS is used as an example. For details, see Creating a Secret. For details about examples of the kubernetes.io/tls secret and its description, see TLS secrets.

      Create a YAML file named ingress-test-secret.yaml. The file name can be customized.

      +

      vi ingress-test-secret.yaml

      +
      The YAML file is configured as follows:
      apiVersion: v1
      +data:
      +  tls.crt: LS0******tLS0tCg==
      +  tls.key: LS0tL******0tLS0K
      +kind: Secret
      +metadata:
      +  annotations:
      +    description: test for ingressTLS secrets
      +  name: ingress-test-secret
      +  namespace: default
      +type: IngressTLS
      +
      +

      In the preceding information, tls.crt and tls.key are only examples. Replace them with the actual files. The values of tls.crt and tls.key are Base64-encoded.

      +
      +

    3. Create a secret.

      kubectl create -f ingress-test-secret.yaml

      +

      If information similar to the following is displayed, the secret has been created:

      +
      secret/ingress-test-secret created
      +

      View the created secret.

      +

      kubectl get secrets

      +

      If information similar to the following is displayed, the secret has been created:

      +
      NAME                         TYPE                                  DATA      AGE
      +ingress-test-secret          IngressTLS                            2         13s
      +

    4. Create a YAML file named ingress-test.yaml. The file name can be customized.

      vi ingress-test.yaml

      +
      For clusters of v1.23 or later:
      apiVersion: networking.k8s.io/v1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  namespace: default
      +spec:
      +  tls: 
      +  - hosts: 
      +    - foo.bar.com
      +    secretName: ingress-test-secret  # Replace it with your TLS key certificate.
      +  rules:
      +    - host: foo.bar.com
      +      http:
      +        paths:
      +          - path: /
      +            backend:
      +              service:
      +                name: <your_service_name>  # Replace it with the name of your target Service.
      +                port:
      +                  number: <your_service_port>  # Replace it with the port number of your target Service.
      +            property:
      +              ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
      +            pathType: ImplementationSpecific
      +  ingressClassName: nginx
      +
      +
      For clusters of v1.21 or earlier:
      apiVersion: networking.k8s.io/v1beta1
      +kind: Ingress 
      +metadata: 
      +  name: ingress-test
      +  annotations: 
      +    kubernetes.io/ingress.class: nginx
      +spec:
      +  tls: 
      +  - hosts: 
      +    - foo.bar.com
      +    secretName: ingress-test-secret   # Replace it with your TLS key certificate.
      +  rules: 
      +  - host: foo.bar.com
      +    http: 
      +      paths: 
      +      - path: '/'
      +        backend: 
      +          serviceName: <your_service_name>  # Replace it with the name of your target Service.
      +          servicePort: <your_service_port>  # Replace it with the port number of your target Service.
      +  ingressClassName: nginx
      +
      +

    5. Create an ingress.

      kubectl create -f ingress-test.yaml

      +

      If information similar to the following is displayed, the ingress has been created.

      +
      ingress/ingress-test created
      +

      View the created ingress.

      +

      kubectl get ingress

      +

      If information similar to the following is displayed, the ingress has been created and the workload is accessible.

      +
      NAME             HOSTS     ADDRESS          PORTS   AGE
      +ingress-test     *         121.**.**.**     80      10s
      +

    6. Enter https://121.**.**.**:443 in the address box of the browser to access the workload (for example, Nginx workload).

      121.**.**.** indicates the IP address of the unified load balancer.

      +

    +
    +
    + +
    + diff --git a/docs/cce/umn/cce_10_0695.html b/docs/cce/umn/cce_10_0695.html index a2774334..bfd297f6 100644 --- a/docs/cce/umn/cce_10_0695.html +++ b/docs/cce/umn/cce_10_0695.html @@ -28,7 +28,7 @@ - diff --git a/docs/cce/umn/cce_10_0697.html b/docs/cce/umn/cce_10_0697.html new file mode 100644 index 00000000..c0c5a2cb --- /dev/null +++ b/docs/cce/umn/cce_10_0697.html @@ -0,0 +1,58 @@ + + +

    Configuring HTTPS Backend Services for an Nginx Ingress

    +

    Ingress can function as a proxy for backend services using different protocols. By default, the backend proxy channel of an ingress is an HTTP channel. To create an HTTPS channel, add the following configuration to the annotations field:

    +
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    +

    An ingress configuration example is as follows:

    +
    For clusters of v1.23 or later:
    apiVersion: networking.k8s.io/v1
    +kind: Ingress 
    +metadata: 
    +  name: ingress-test
    +  namespace: default
    +  annotations:
    +    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    +spec:
    +  tls: 
    +    - secretName: ingress-test-secret  # Replace it with your TLS key certificate.
    +  rules:
    +    - host: ''
    +      http:
    +        paths:
    +          - path: '/'
    +            backend:
    +              service:
    +                name: <your_service_name>  # Replace it with the name of your target Service.
    +                port:
    +                  number: <your_service_port>  # Replace it with the port number of your target Service.
    +            property:
    +              ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    +            pathType: ImplementationSpecific
    +  ingressClassName: nginx
    +
    +
    For clusters of v1.21 or earlier:
    apiVersion: networking.k8s.io/v1beta1
    +kind: Ingress
    +metadata:
    +  name: ingress-test
    +  namespace: default
    +  annotations:
    +    kubernetes.io/ingress.class: nginx
    +    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    +spec:
    +  tls: 
    +    - secretName: ingress-test-secret  # Replace it with your TLS key certificate.
    +  rules:
    +    - host: ''
    +      http:
    +        paths:
    +          - path: '/'
    +            backend:
    +              serviceName: <your_service_name>  # Replace it with the name of your target Service.
    +              servicePort: <your_service_port>  # Replace it with the port number of your target Service.
    +
    +
    +
    + +
    + diff --git a/docs/cce/umn/cce_10_0698.html b/docs/cce/umn/cce_10_0698.html new file mode 100644 index 00000000..0547f937 --- /dev/null +++ b/docs/cce/umn/cce_10_0698.html @@ -0,0 +1,61 @@ + + +

    Configuring Consistent Hashing for Load Balancing of an Nginx Ingress

    +

    The native Nginx supports multiple load balancing rules, including weighted round robin and IP hash. Nginx Ingress supports load balancing by using consistent hashing based on the native Nginx capabilities.

    +

    By default, the IP hash method supported by Nginx uses the linear hash space. The backend server is selected based on the hash value of the IP address. However, when this method is used to add or delete a node, all IP addresses need to be hashed again and then routed again. As a result, a large number of sessions are lost or the cache becomes invalid. Therefore, consistent hashing is introduced to Nginx Ingress to solve this problem.

    +

    Consistent hashing is a special hash algorithm, which constructs a ring hash space to replace the common linear hash space. When a node is added or deleted, only the target route is migrated clockwise, and other routes do not need to be changed. In this way, rerouting can be reduced as much as possible, resolving the load balancing issue caused by dynamic node addition and deletion.

    +

    If a consistent hashing rule is configured, the newly added server will share the load of all other servers. Similarly, when a server is removed, all other servers can share the load of the removed server. This balances the load among nodes in the cluster and prevents the avalanche effect caused by the breakdown of a node.

    +

    Configuring a Consistent Hashing Rule

    Nginx Ingress can use the nginx.ingress.kubernetes.io/upstream-hash-by annotation to configure consistent hashing rules. The following is an example:

    +
    For clusters of v1.23 or later:
    apiVersion: networking.k8s.io/v1
    +kind: Ingress 
    +metadata: 
    +  name: ingress-test
    +  namespace: default
    +  annotations:
    +    nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"  # Perform hashing based on the request URI.
    +spec:
    +  rules:
    +    - host: ''
    +      http:
    +        paths:
    +          - path: '/'
    +            backend:
    +              service:
    +                name: <your_service_name>  # Replace it with the name of your target Service.
    +                port:
    +                  number: <your_service_port>  # Replace it with the port number of your target Service.
    +            property:
    +              ingress.beta.kubernetes.io/url-match-mode: STARTS_WITH
    +            pathType: ImplementationSpecific
    +  ingressClassName: nginx
    +
    +
    For clusters of v1.21 or earlier:
    apiVersion: networking.k8s.io/v1beta1
    +kind: Ingress
    +metadata:
    +  name: ingress-test
    +  namespace: default
    +  annotations:
    +    kubernetes.io/ingress.class: nginx
    +    nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"  # Perform hashing based on the request URI.
    +spec:
    +  rules:
    +    - host: ''
    +      http:
    +        paths:
    +          - path: '/'
    +            backend:
    +              serviceName: <your_service_name>  # Replace it with the name of your target Service.
    +              servicePort: <your_service_port>  # Replace it with the port number of your target Service.
    +
    +
    The value of nginx.ingress.kubernetes.io/upstream-hash-by can be an nginx variable, a text value, or any combination:
    • nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri" indicates that requests are hashed based on the request URI.
    • nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri$host" indicates that requests are hashed based on the request URI and domain name.
    • nginx.ingress.kubernetes.io/upstream-hash-by: "${request_uri}-text-value" indicates that requests are hashed based on the request URI and text value.
    +
    +
    + +
    +
    + +
    + diff --git a/docs/cce/umn/cce_10_0699.html b/docs/cce/umn/cce_10_0699.html new file mode 100644 index 00000000..307b0e33 --- /dev/null +++ b/docs/cce/umn/cce_10_0699.html @@ -0,0 +1,352 @@ + + +

    Configuring Nginx Ingresses Using Annotations

    +

    The nginx-ingress add-on in CCE uses the community chart and image. If the default add-on parameters cannot meet your demands, you can add annotations to define what you need, such as the default backend, timeout, and size of a request body.

    +

    This section describes common annotations used for creating an ingress of the Nginx type.

    +
    • The key value of an annotation can only be a string. Other types (such as Boolean values or numeric values) must be enclosed in quotation marks (""), for example, "true", "false", and "100".
    • Nginx Ingress supports native annotations of the community. For details, see Annotations.
    +
    + +

    Ingress Type

    +
    Table 1 Load balancer configurations

    How to Create

    Specifies the load balancing algorithm of the backend server group. The default value is ROUND_ROBIN.

    Options:

    -
    • ROUND_ROBIN: weighted round robin algorithm
    • LEAST_CONNECTIONS: weighted least connections algorithm
    • SOURCE_IP: source IP hash algorithm
    +
    • ROUND_ROBIN: weighted round robin algorithm
    • LEAST_CONNECTIONS: weighted least connections algorithm
    • SOURCE_IP: source IP hash algorithm
    NOTE:

    If this parameter is set to SOURCE_IP, the weight setting (weight field) of backend servers bound to the backend server group is invalid, and sticky session cannot be enabled.

    String

    • cce: A proprietary LoadBalancer ingress is used.
    +
    • cce: A proprietary LoadBalancer ingress is used.
    • nginx: Nginx Ingress is used.

    This parameter is mandatory when an ingress is created by calling the API.

    For clusters of v1.23 or later, use the parameter ingressClassName. For details, see Using kubectl to Create a LoadBalancer Ingress.

    + + + + + + + + + + + +
    Table 1 Ingress type annotations

    Parameter

    +

    Type

    +

    Description

    +

    Supported Cluster Version

    +

    kubernetes.io/ingress.class

    +

    String

    +
    • nginx: Nginx Ingress is used.
    • cce: A proprietary LoadBalancer ingress is used.
    +

    This parameter is mandatory when an ingress is created by calling the API.

    +

    For clusters of v1.23 or later, use the parameter ingressClassName. For details, see Using kubectl to Create an Nginx Ingress.

    +

    Only clusters of v1.21 or earlier

    +
    +
    +

    For details about how to use the preceding annotations, see Using kubectl to Create an Nginx Ingress.

    + +

    Interconnecting with HTTPS Backend Services

    +
    + + + + + + + + + +
    Table 2 Annotations for interconnecting with HTTPS backend services

    Parameter

    +

    Type

    +

    Description

    +

    nginx.ingress.kubernetes.io/backend-protocol

    +

    String

    +

    If this parameter is set to HTTPS, HTTPS is used to forward requests to the backend service container.

    +
    +
    +

    For details, see Configuring HTTPS Backend Services for an Nginx Ingress.

    +
    +

    Creating a Consistent Hashing Rule for Load Balancing

    +
    + + + + + + + + + +
    Table 3 Annotation of consistent hashing for load balancing

    Parameter

    +

    Type

    +

    Description

    +

    nginx.ingress.kubernetes.io/upstream-hash-by

    +

    String

    +
    Enable consistent hashing for load balancing for backend servers. The parameter value can be an Nginx parameter, a text value, or any combination. For example:
    • nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri" indicates that requests are hashed based on the request URI.
    • nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri$host" indicates that requests are hashed based on the request URI and domain name.
    • nginx.ingress.kubernetes.io/upstream-hash-by: "${request_uri}-text-value" indicates that requests are hashed based on the request URI and text value.
    +
    +
    +
    +

    For details, see Configuring Consistent Hashing for Load Balancing of an Nginx Ingress.

    +
    +

    Customized Timeout Interval

    +
    + + + + + + + + + +
    Table 4 Customized timeout interval annotations

    Parameter

    +

    Type

    +

    Description

    +

    nginx.ingress.kubernetes.io/proxy-connect-timeout

    +

    String

    +

    Customized connection timeout interval. You do not need to set the unit when setting the timeout interval. The default unit is second.

    +

    Example:

    +
    nginx.ingress.kubernetes.io/proxy-connect-timeout: '120'
    +
    +
    +
    +

    Customizing a Body Size

    +
    + + + + + + + + + +
    Table 5 Annotations of customizing a body size

    Parameter

    +

    Type

    +

    Description

    +

    nginx.ingress.kubernetes.io/proxy-body-size

    +

    String

    +

    When the body size in a request exceeds the upper limit, error 413 will be returned to the client. You can use this parameter to adjust the upper limit of the body size. The basic unit of the parameter value is byte. You can use units such as KB, MB, and GB. The unit conversion is as follows:

    +

    1 KB = 1024 bytes, 1 MB = 1024 KB, 1 GB =1024 MB

    +

    Example:

    +
    nginx.ingress.kubernetes.io/proxy-body-size: 8m
    +
    +
    +
    +

    Two-Way HTTPS Authentication

    Nginx Ingress supports two-way HTTPS authentication between the server and client to ensure secure connections.

    +
    1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
    2. Run the following command to create a self-signed CA certificate:

      openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 356 -nodes -subj '/CN=Ingress Cert Authority'
      +

      Expected output:

      +
      Generating a RSA private key
      +.............++++
      +................................................++++
      +writing new private key to 'ca.key'
      +-----
      +

    3. Create a server certificate.

      1. Run the following command to create a request file for generating a server certificate:
        openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=foo.bar.com'
        +

        Expected output:

        +
        Generating a RSA private key
        +.....................................................++++
        +..........++++
        +writing new private key to 'server.key'
        +-----
        +
      2. Run the following command to issue the server request file using the root certificate to generate the server certificate:
        openssl x509 -req -sha256 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
        +

        Expected output:

        +
        Signature ok
        +subject=CN = foo.bar.com
        +Getting CA Private Key
        +
      +

    4. Create a client certificate.

      1. Run the following command to create a request file for generating a client certificate:
        openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=Ingress'
        +

        Expected output:

        +
        Generating a RSA private key
        +.................................++++
        +................................................++++
        +writing new private key to 'client.key'
        +-----
        +
      2. Run the following command to issue the client request file using the root certificate to generate the client certificate:
        openssl x509 -req -sha256 -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 02 -out client.crt
        +

        Expected output:

        +
        Signature ok
        +subject=CN = Ingress
        +Getting CA Private Key
        +
      +

    5. Run the ls command to check the created certificates.

      Expected output:

      +
      ca.crt  ca.key  client.crt  client.csr  client.key  server.crt  server.csr  server.key
      +

    6. Run the following command to create a secret of the CA certificate:

      kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt
      +

      Expected output:

      +
      secret/ca-secret created
      +

    7. Run the following command to create a secret of the server certificate:

      kubectl create secret generic tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key
      +

      Expected output:

      +
      secret/tls-secret created
      +

    8. Create a YAML file named ingress-test.yaml. The file name can be customized.

      vi ingress-test.yaml

      +
      • For clusters of v1.23 or later:
        apiVersion: networking.k8s.io/v1
        +kind: Ingress
        +metadata:
        +  annotations:
        +    nginx.ingress.kubernetes.io/auth-tls-verify-client: "on"
        +    nginx.ingress.kubernetes.io/auth-tls-secret: "default/ca-secret"   # Replace it with your CA certificate secret.
        +    nginx.ingress.kubernetes.io/auth-tls-verify-depth: "1"
        +    nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true"
        +  name: ingress-test
        +  namespace: default
        +spec:
        +  rules:
        +  - host: foo.bar.com
        +    http:
        +      paths:
        +      - backend:
        +          service:
        +            name: nginx-test  # Replace it with the name of your target Service.
        +            port: 
        +              number: 80  # Replace it with the port of your target Service.
        +        path: /
        +        pathType: ImplementationSpecific
        +  tls:
        +  - hosts:
        +    - foo.bar.com
        +    secretName: tls-secret   # Replace it with your TLS certificate secret.
        +  ingressClassName: nginx
        +
      • For clusters of v1.21 or earlier:
        apiVersion: networking.k8s.io/v1beta1
        +kind: Ingress 
        +metadata: 
        +  annotations: 
        +    kubernetes.io/ingress.class: nginx
        +    nginx.ingress.kubernetes.io/auth-tls-verify-client: "on"
        +    nginx.ingress.kubernetes.io/auth-tls-secret: "default/ca-secret"   # Replace it with your CA certificate secret.
        +    nginx.ingress.kubernetes.io/auth-tls-verify-depth: "1"
        +    nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true"
        +  name: ingress-test
        +  namespace: default
        +spec:
        +  rules: 
        +  - host: foo.bar.com
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          serviceName: nginx-test  # Replace it with the name of your target Service.
        +          servicePort: 80  # Replace it with the port of your target Service.
        +  tls: 
        +  - hosts: 
        +    - foo.bar.com
        +    secretName: tls-secret   # Replace it with your TLS key certificate.
        +
      +

    9. Run the following command to create an ingress:

      kubectl create -f ingress-test.yaml
      +

      Expected output:

      +
      ingress.networking.k8s.io/ingress-test created
      +

    10. Run the following command to obtain the IP address of the ingress:

      kubectl get ingress
      +

      Expected output:

      +
      NAME         CLASS   HOSTS         ADDRESS      PORTS     AGE
      +nginx-test   nginx   foo.bar.com   10.3.xx.xx   80, 443   27m
      +

    11. Run the following command to update the IP address of the ingress into the hosts file and replace the following IP address with the actual IP address of the ingress:

      echo "10.3.xx.xx  foo.bar.com" | sudo tee -a /etc/hosts
      +

      Expected output:

      +
      10.3.xx.xx  foo.bar.com
      +

    12. Verify the configuration.

      • The client does not send the certificate for access.
        curl --cacert ./ca.crt  https://foo.bar.com
        +

        Expected output:

        +
        <html>
        +<head><title>400 No required SSL certificate was sent</title></head>
        +<body>
        +<center><h1>400 Bad Request</h1></center>
        +<center>No required SSL certificate was sent</center>
        +<hr><center>nginx</center>
        +</body>
        +</html>
        +
      • The client sends the certificate for access.
        curl --cacert ./ca.crt --cert ./client.crt --key ./client.key https://foo.bar.com
        +

        Expected output:

        +
        <!DOCTYPE html>
        +<html>
        +<head>
        +<title>Welcome to nginx!</title>
        +<style>
        +body {
        +    width: 35em;
        +    margin: 0 auto;
        +    font-family: Tahoma, Verdana, Arial, sans-serif;
        +}
        +</style>
        +</head>
        +<body>
        +<h1>Welcome to nginx!</h1>
        +<p>If you see this page, the nginx web server is successfully installed and
        +working. Further configuration is required.</p>
        + 
        +<p>For online documentation and support please refer to
        +<a href="http://nginx.org/">nginx.org</a>.<br/>
        +Commercial support is available at
        +<a href="http://nginx.com/">nginx.com</a>.</p>
        + 
        +<p><em>Thank you for using nginx.</em></p>
        +</body>
        +</html>
        +
      +

    +
    +

    Domain Name Regularization

    Nginx Ingress allows you to configure the nginx.ingress.kubernetes.io/server-alias annotation to configure regular expressions for domain names.

    +
    1. Use kubectl to access the cluster. For details, see Connecting to a Cluster Using kubectl.
    2. Create a YAML file named ingress-test.yaml. The file name can be customized.

      vi ingress-test.yaml
      +

      For example, the regular expression ~^www\.\d+\.example\.com$,abc.example.com indicates that you can access the ingress using www.{One or more digits}.example.com and abc.example.com.

      +
      • For clusters of v1.23 or later:
        apiVersion: networking.k8s.io/v1
        +kind: Ingress
        +metadata:
        +  annotations:
        +    nginx.ingress.kubernetes.io/server-alias: '~^www\.\d+\.example\.com$,abc.example.com'
        +  name: ingress-test
        +  namespace: default
        +spec:
        +  rules:
        +  - host: foo.bar.com
        +    http:
        +      paths:
        +      - backend:
        +          service:
        +            name: nginx-93244  # Replace it with the name of your target Service.
        +            port: 
        +              number: 80  # Replace it with the port of your target Service.
        +        path: /
        +        pathType: ImplementationSpecific
        +  ingressClassName: nginx
        +
      • For clusters of v1.21 or earlier:
        apiVersion: networking.k8s.io/v1beta1
        +kind: Ingress 
        +metadata: 
        +  annotations: 
        +    kubernetes.io/ingress.class: nginx
        +    nginx.ingress.kubernetes.io/ server-alias: '~^www\.\d+\.example\.com$,abc.example.com'
        +  name: ingress-test
        +  namespace: default
        +spec:
        +  rules: 
        +  - host: foo.bar.com
        +    http: 
        +      paths: 
        +      - path: '/'
        +        backend: 
        +          serviceName: nginx-test  # Replace it with the name of your target Service.
        +          servicePort: 80  # Replace it with the port of your target Service.
        +
      +

    3. Run the following command to create an ingress:

      kubectl create -f ingress-test.yaml
      +

      Expected output:

      +
      ingress.networking.k8s.io/ingress-test created
      +

    4. Check the Nginx Ingress Controller configuration.

      1. Run the following command to check the Nginx Ingress Controller pods:
        kubectl get pods -n kube-system | grep nginx-ingress-controller
        +

        Expected output:

        +
        cceaddon-nginx-ingress-controller-68d7bcc67-dxxxx        1/1     Running   0          18h
        +cceaddon-nginx-ingress-controller-68d7bcc67-cxxxx        1/1     Running   0          18h
        +
      2. Run the following command to check the Nginx Ingress Controller configuration:
        kubectl exec -n kube-system cceaddon-nginx-ingress-controller-68d7bcc67-dxxxx cat /etc/nginx/nginx.conf | grep -C3 "foo.bar.com"
        +

        Expected output:

        +
                 ## start server foo.bar.com
        +         server {
        +                  server_name foo.bar.com abc.example.com ~^www\.\d+\.example\.com$ ;
        +                  
        +                  listen 80  ;
        +                  listen [::]:80  ;
        +--
        +                  }
        +                  
        +         }
        +         ## end server foo.bar.com
        +
      +

    5. Run the following command to obtain the IP address of the ingress:

      kubectl get ingress
      +

      Expected output:

      +
      NAME         CLASS   HOSTS         ADDRESS      PORTS   AGE
      +nginx-test   nginx   foo.bar.com   10.3.xx.xx   80      14m
      +

    6. Use different rules to test service access.

      • Run the following command to access the service through Host: foo.bar.com:
        curl -H "Host: foo.bar.com" 10.3.xx.xx/
        +

        It is expected that the web page can be accessed properly.

        +
      • Run the following command to access the service through Host: www.123.example.com:
        curl -H "Host: www.123.example.com" 10.3.xx.xx/
        +

        It is expected that the web page can be accessed properly.

        +
      • Run the following command to access the service through Host: www.321.example.com:
        curl -H "Host: www.321.example.com" 10.3.xx.xx/
        +

        It is expected that the web page can be accessed properly.

        +
      +

    +
    +

    Documentation

    For details about annotation parameters supported by Nginx Ingress, see Annotations.

    +
    + +
    + +
    + diff --git a/docs/cce/umn/cce_10_0734.html b/docs/cce/umn/cce_10_0734.html index e318bb5b..009dc5bc 100644 --- a/docs/cce/umn/cce_10_0734.html +++ b/docs/cce/umn/cce_10_0734.html @@ -31,7 +31,7 @@
    -

    Notes and Constraints

    • To access a pod bound with an EIP from the Internet, add security group rules to allow the target request traffic.
    • Only one EIP can be bound to a pod.
    • Configure the EIP-related annotation when creating a pod. After the pod is created, the annotations related to the EIP cannot be modified.
    • Do not perform operations on the EIP associated with a pod through the EIP console or API. Otherwise, the EIP may malfunction. The operations include changing the EIP name, deleting, unbinding, or binding the EIP, and changing the billing mode of the EIP.
    • After an automatically allocated EIP is manually deleted, the network malfunctions. In this case, rebuild the pod.
    +

    Notes and Constraints

    • To access a pod bound with an EIP from the Internet, add security group rules to allow the target request traffic.
    • Only one EIP can be bound to a pod.
    • Configure the EIP-related annotation when creating a pod. After the pod is created, the annotations related to the EIP cannot be modified.
    • Do not perform operations on the EIP associated with a pod through the EIP console or API. Otherwise, the EIP may malfunction. The operations include changing the EIP name, deleting, and unbinding or binding the EIP.
    • After an automatically allocated EIP is manually deleted, the network malfunctions. In this case, rebuild the pod.

    Allocating an EIP with a Pod

    If you set the pod-with-eip annotation to true when creating a pod, an EIP will be automatically allocated and bound to the pod.

    • CCE will automatically add cluster ID, namespace, and pod name labels to an EIP that has been allocated automatically.
    • If a pod is created with an automatically allocated EIP, deleting the pod will also delete the EIP.
    @@ -192,7 +192,7 @@ spec:

    Whether to allocate an EIP with a pod and bind the EIP to the pod

    false or true

    +

    false or true

    yangtse.io/eip-network-type

    diff --git a/docs/cce/umn/cce_10_0910.html b/docs/cce/umn/cce_10_0910.html index ae88b8ba..d0b96c90 100644 --- a/docs/cce/umn/cce_10_0910.html +++ b/docs/cce/umn/cce_10_0910.html @@ -10,6 +10,8 @@

    EVS

    +

    +

    +

    csi.storage.k8s.io/csi-driver-name

    Yes

    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    +
    EVS disk type. All letters are in uppercase.
    • SATA: common I/O
    • SAS: high I/O
    • SSD: ultra-high I/O
    • GPSSD: general-purpose SSD
    • ESSD: extreme SSD

    CCE proprietary

    Network

    +

    Network

    +

    CoreDNS

    CCE proprietary

    NGINX Ingress Controller

    +

    nginx-ingress

    +

    Featured open source

    +

    Storage

    CCE Container Storage (Everest)

    diff --git a/docs/cce/umn/cce_bulletin_0068.html b/docs/cce/umn/cce_bulletin_0068.html index 15c7f109..bae547eb 100644 --- a/docs/cce/umn/cce_bulletin_0068.html +++ b/docs/cce/umn/cce_bulletin_0068.html @@ -9,28 +9,28 @@

    New and Enhanced Features

    Features in alpha stage are disabled by default, those in beta stage are enabled by default, and those in GA stage are always enabled and they cannot be disabled. The function of turning on or off the features in GA stage will be removed in later Kubernetes versions. CCE policies for new features are the same as those in the community.

    • The version skew policy is expanded to three versions.

      Starting with control planes 1.28 and worker nodes 1.25, the Kubernetes skew policy expands the supported control plane and worker node skew to three versions. This enables annual minor version upgrades of nodes while staying on supported minor versions. For details, see Version Skew Policy.

    • Retroactive Default StorageClass moves to GA.

      The retroactive default StorageClass assignment graduates to GA. This enhancement brings a significant improvement to how default StorageClasses are assigned to PersistentVolumeClaims (PVCs).

      -

      The PV controller has been modified to automatically assign a default StorageClass to any unbound PVC with storageClassName not configured. Additionally, the PVC admission validation mechanism within the API server has been adjusted to allow changing values from an unset state to an actual StorageClass name. For details, see Retroactive default StorageClass assignment.

      -
    • Native sidecar containers are introduced.

      The native sidecar containers are available in alpha. Kubernetes 1.28 adds restartPolicy to Init containers. This field is available when the SidecarContainers feature gate is enabled. However, there are still some problems to be solved in the native sidecar containers. Therefore, the Kubernetes community recommends only using this feature gate in short lived testing clusters at the alpha phase. For details, see Introducing native sidecar containers.

      +

      The PV controller has been modified to automatically assign a default StorageClass to any unbound PVC with storageClassName not configured. Additionally, the PVC admission validation mechanism within the API server has been adjusted to allow changing values from an unset state to an actual StorageClass name. For details, see Retroactive default StorageClass assignment.

      +
    • Native sidecar containers are introduced.

      The native sidecar containers are available in alpha. Kubernetes 1.28 adds restartPolicy to Init containers. This field is available when the SidecarContainers feature gate is enabled. However, there are still some problems to be solved in the native sidecar containers. Therefore, the Kubernetes community recommends only using this feature gate in short lived testing clusters at the alpha phase. For details, see Introducing native sidecar containers.

    • Mixed version proxy is introduced.

      A new mechanism (mixed version proxy) is released to improve cluster upgrade. It is an alpha feature in Kubernetes 1.28. When a cluster undergoes an upgrade, API servers of different versions in the cluster can serve different sets (groups, versions, or resources) of built-in resources. A resource request made in this scenario may be served by any of the available API servers, potentially resulting in the request ending up at an API server that may not be aware of the requested resource. As a result, the request fails. This feature can solve this problem. (Note that CCE provides hitless upgrade. Therefore, this feature is not used in CCE clusters.) For details, see A New (alpha) Mechanism For Safer Cluster Upgrades.

    • Non-graceful node shutdown moves to GA.

      The non-graceful node shutdown is now GA in Kubernetes 1.28. When a node was shut down and that shutdown was not detected by the Kubelet's Node Shutdown Manager, the StatefulSet pods that run on this node will stay in the terminated state and cannot be moved to a running node. If you have confirmed that the shutdown node is unrecoverable, you can add an out-of-service taint to the node. This ensures that the StatefulSet pods and VolumeAttachments on this node can be forcibly deleted and the corresponding pods will be created on a healthy node. For details, see Non-Graceful Node Shutdown Moves to GA.

    • NodeSwap moves to beta.

      Support for NodeSwap goes to beta in Kubernetes 1.28. NodeSwap is disabled by default and can be enabled using the NodeSwap feature gate. NodeSwap allows you to configure swap memory usage for Kubernetes workloads running on Linux on a per-node basis. Note that although NodeSwap has reached beta, there are still some problems to be solved and security risks to be enhanced. For details, see Beta Support for Using Swap on Linux.

      -
    • Two Job-related features are added.

      Two alpha features are introduced: delayed creation of replacement pods and backoff limit per index.

      +
    • Two job-related features are added.

      Two alpha features are introduced: delayed creation of replacement pods and backoff limit per index.

      • Delayed creation of replacement pods

        By default, when a pod enters the terminating state (for example, due to the preemption or eviction), Kubernetes immediately creates a replacement pod. Therefore, both pods are running concurrently.

        -

        In Kubernetes 1.28, this feature can be enabled by turning on the JobPodReplacementPolicy feature gate. With this feature gate enabled, you can set the podReplacementPolicy field under spec of a Job to Failed. In this way, pods would only be replaced when they reached the failed phase, and not when they are terminating. Additionally, you can check the .status.termination field of a job. The value of this field is the number of pods owned by the Job that are currently terminating.

        -
      • Backoff limit per index

        By default, pod failures for indexed jobs are recorded and restricted by the global limit of retries, specified by .spec.backoffLimit. This means that if there is a consistently failing index in a job, pods specified by the job will be restarted repeatedly until pod failures exhaust the limit. Once the limit is reached, the Job is marked failed and pods for other indexes in the Job may never be even started.

        -

        In Kubernetes 1.28, this feature can be enabled by turning on the JobBackoffLimitPerIndex feature gate of a cluster. With this feature gate enabled, .spec.backoffLimitPerIndex can be specified when an indexed Job is created. Only if the failures of pods with all indexes specified in this Job exceed the upper limit, pods specified by the Job will not be restarted.

        +

        In Kubernetes 1.28, this feature can be enabled by turning on the JobPodReplacementPolicy feature gate. With this feature gate enabled, you can set the podReplacementPolicy field under spec of a job to Failed. In this way, pods would only be replaced when they reached the failed phase, and not when they are terminating. Additionally, you can check the .status.termination field of a job. The value of this field is the number of pods owned by the job that are currently terminating.

        +
      • Backoff limit per index

        By default, pod failures for indexed jobs are recorded and restricted by the global limit of retries, specified by .spec.backoffLimit. This means that if there is a consistently failing index in a job, pods specified by the job will be restarted repeatedly until pod failures exhaust the limit. Once the limit is reached, the job is marked failed and pods for other indexes in the job may never be even started.

        +

        In Kubernetes 1.28, this feature can be enabled by turning on the JobBackoffLimitPerIndex feature gate of a cluster. With this feature gate enabled, .spec.backoffLimitPerIndex can be specified when an indexed job is created. Only if the failures of pods with all indexes specified in this job exceed the upper limit, pods specified by the job will not be restarted.

      -
    • Some Common Expression Language (CEL) related features are improved.

      CEL related capabilities are enhanced.

      -
      • CEL used to validate CustomResourceDefinitions (CRDs) moves to beta.

        This feature has been upgraded to beta since Kubernetes 1.25. By embedding CEL expressions into CRDs, developers can solve most of the CR validation use cases without using webhooks. More CEL functions, such as support for default value and CRD conversion, will be developed in later Kubernetes versions.

        +
      • Some CEL related features are improved.

        CEL related capabilities are enhanced.

        +
        • CEL used to validate CRDs moves to beta.

          This feature has been upgraded to beta since Kubernetes 1.25. By embedding CEL expressions into CRDs, developers can solve most of the CR validation use cases without using webhooks. More CEL functions, such as support for default value and CRD conversion, will be developed in later Kubernetes versions.

        • CEL admission control graduates to beta.

          CEL admission control is customizable. With CEL expressions, you can decide whether to accept or reject requests received by kube-apiserver. CEL expressions can also serve as a substitute for admission webhooks. Kubernetes 1.28 has upgraded CEL admission control to beta and introduced new functions, such as:

          -
          • ValidatingAdmissionPolicy can correctly handle the authorizer variable.
          • ValidatingAdmissionPolicy can have the messageExpression field checked.
          • The ValidatingAdmissionPolicy controller is added to kube-controller-manager to check the type of the CEL expression in ValidatingAdmissionPolicy and save the reason in the status field.
          • CEL expressions can contain a combination of one or more variables, which can be defined in ValidatingAdmissionPolicy. These variables can be used to define other variables.
          • CEL library functions can be used to parse resources specified by resource.Quantity in Kubernetes.
          +
          • ValidatingAdmissionPolicy can correctly handle the authorizer variable.
          • ValidatingAdmissionPolicy can have the messageExpression field checked.
          • The ValidatingAdmissionPolicy controller is added to kube-controller-manager to check the type of the CEL expression in ValidatingAdmissionPolicy and save the reason in the status field.
          • CEL expressions can contain a combination of one or more variables, which can be defined in ValidatingAdmissionPolicy. These variables can be used to define other variables.
          • CEL library functions can be used to parse resources specified by resource.Quantity in Kubernetes.
      • Other features
        • The ServiceNodePortStaticSubrange feature gate moves to beta. With this feature enabled, static port range can be reserved to avoid conflicts with dynamically allocated ports. For details, see Avoiding Collisions Assigning Ports to NodePort Services.
        • The alpha feature ConsistentListFromCache is added to allow the API server to serve consistent lists from cache. Get and list requests can read data from the cache instead of etcd.
        • In Kubernetes 1.28, kubelet can configure the drop-in directory (alpha). This feature allows you to add support for the --config-dir flag to kubelet so that you can specify an insert directory that overwrites the kubelet configuration in /etc/kubernetes/kubelet.conf.
        • ExpandedDNSConfig moves to GA and is enabled by default. With this feature enabled, DNS configurations can be expanded.
        • The alpha feature CRDValidationRatcheting is added. This feature allows CRs with failing validations to pass if a Patch or Update request does not alter any of the invalid fields.
        • --concurrent-cron-job-syncs is added to kube-controller-manager to configure the number of workers for the cron job controller.
    -

    API Changes and Removals

    • NetworkPolicyStatus is removed. There is no status attribute in a network policy.
    • annotationbatch.kubernetes.io/cronJob-scheduled-timestamp is added to Job objects to indicate the creation time of a Job.
    • The podReplacementPolicy and terminating fields are added to Job APIs. With these fields specified, once a previously created pod is terminated in a Job, the Job immediately starts a new pod to replace the pod. The new fields allow you to specify whether to replace the pod immediately after the previous pod is terminated (original behavior) or replace the pod after the existing pod is completely terminated (new behavior). This is an alpha feature, and you can enable it by turning on the JobPodReplacementPolicy feature gate in your cluster.
    • The BackoffLimitPerIndex field is available in a Job. Pods specified by a Job share a backoff mechanism. When backoff times of the Job reach the limit, this Job is marked as failed and resources, including indexes that are not running, are cleared up. This field allows you to configure backoff limit for a single index. For details, see Backoff limit per index.
    • The ServedVersions field is added to the StorageVersion API. This change is introduced by mixed version proxy. The new field is used to indicate a version that can be provided by the API server.
    • SelfSubjectReview is added to authentication.k8s.io/v1, and kubectl auth whoami goes to GA.
    • LastPhaseTransitionTime is added to PersistentVolume. The new field is used to store the last time when a volume changes to a different phase.
    • resizeStatus in PVC.Status is replaced by AllocatedResourceStatus. The new field indicates the statuses of the storage resize operation. The default value is an empty string.
    • If hostNetwork is set to true and ports are specified for a pod, the hostport field will be automatically configured.
    • StatefulSet pods have the pod index set as a pod label statefulset.kubernetes.io/pod-index.
    • PodHasNetwork in the Condition field of pods has been renamed to PodReadyToStartContainers. The new field specifies that containers are ready to start after the network, volumes, and sandbox pod have been created.
    • A new configuration option delayCacheUntilActive is added to KubeSchedulerConfiguration. If delayCacheUntilActive is set to true, kube-scheduler on the leader will not cache scheduling information. This reduces the memory pressure of other master nodes, but slows down the failover speed after the leader failed.
    • The namespaceParamRef field is added to admissionregistration.k8s.io/v1alpha1.ValidatingAdmissionPolicy.
    • The reason and fieldPath fields are added to CRD validation rules to allow you to specify reason and field path after verification failed.
    • The CEL expression of ValidatingAdmissionPolicy supports namespace access via namespaceObject.
    • API groups ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding are promoted to betav1.
    • A ValidatingAdmissionPolicy now has its messageExpression field checked against resolved types.
    +

    API Changes and Removals

    • NetworkPolicyStatus is removed. There is no status attribute in a network policy.
    • annotationbatch.kubernetes.io/cronJob-scheduled-timestamp is added to job objects to indicate the creation time of a job.
    • The podReplacementPolicy and terminating fields are added to job APIs. With these fields specified, once a previously created pod is terminated in a job, the job immediately starts a new pod to replace the pod. The new fields allow you to specify whether to replace the pod immediately after the previous pod is terminated (original behavior) or replace the pod after the existing pod is completely terminated (new behavior). This is an alpha feature, and you can enable it by turning on the JobPodReplacementPolicy feature gate in your cluster.
    • The BackoffLimitPerIndex field is available in a job. Pods specified by a job share a backoff mechanism. When backoff times of the job reach the limit, this job is marked as failed and resources, including indexes that are not running, are cleared up. This field allows you to configure backoff limit for a single index. For details, see Backoff limit per index.
    • The ServedVersions field is added to the StorageVersion API. This change is introduced by mixed version proxy. The new field is used to indicate a version that can be provided by the API server.
    • SelfSubjectReview is added to authentication.k8s.io/v1, and kubectl auth whoami goes to GA.
    • LastPhaseTransitionTime is added to PersistentVolume. The new field is used to store the last time when a volume changes to a different phase.
    • resizeStatus in PVC.Status is replaced by AllocatedResourceStatus. The new field indicates the statuses of the storage resize operation. The default value is an empty string.
    • If hostNetwork is set to true and ports are specified for a pod, the hostport field will be automatically configured.
    • StatefulSet pods have the pod index set as a pod label statefulset.kubernetes.io/pod-index.
    • PodHasNetwork in the Condition field of pods has been renamed to PodReadyToStartContainers. The new field specifies that containers are ready to start after the network, volumes, and sandbox pod have been created.
    • A new configuration option delayCacheUntilActive is added to KubeSchedulerConfiguration. If delayCacheUntilActive is set to true, kube-scheduler on the leader will not cache scheduling information. This reduces the memory pressure of other master nodes, but slows down the failover speed after the leader failed.
    • The namespaceParamRef field is added to admissionregistration.k8s.io/v1alpha1.ValidatingAdmissionPolicy.
    • The reason and fieldPath fields are added to CRD validation rules to allow you to specify reason and field path after verification failed.
    • The CEL expression of ValidatingAdmissionPolicy supports namespace access via namespaceObject.
    • API groups ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding are promoted to betav1.
    • A ValidatingAdmissionPolicy now has its messageExpression field checked against resolved types.
    -

    Feature Gate and Command Line Parameter Changes and Removals

    • –short is removed from kubelet. Therefore, the default output of kubectl version is the same as that of kubectl version –short.
    • --volume-host-cidr-denylist and --volume-host-allow-local-loopback are removed from kube-controller-manager. --volume-host-cidr-denylist is a comma-separated list of CIDR ranges. Volume plugins at these IP addresses are not allowed. If --volume-host-allow-local-loopback is set to false, the local loopback IP address and the CIDR ranges specified in --volume-host-cidr-denylist are disabled.
    • --azure-container-registry-config is deprecated in kubelet and will be deleted in later Kubernetes versions. Use --image-credential-provider-config and --image-credential-provider-bin-dir instead.
    • --lock-object-namespace and --lock-object-name are removed from kube-scheduler. Use --leader-elect-resource-namespace and --leader-elect-resource-name or ComponentConfig instead. (--lock-object-namespace is used to define the namespace of a lock object, and --lock-object-name is used to define the name of a lock object.)
    • KMS v1 is deprecated and will only receive security updates. Use KMS v2 instead. In later Kubernetes versions, use --feature-gates=KMSv1=true to configure a KMS v1 provider.
    • The DelegateFSGroupToCSIDriver, DevicePlugins, KubeletCredentialProviders, MixedProtocolLBService, ServiceInternalTrafficPolicy, ServiceIPStaticSubrange, and EndpointSliceTerminatingCondition feature gates are removed.
    +

    Feature Gate and Command Line Parameter Changes and Removals

    • –short is removed from kubelet. Therefore, the default output of kubectl version is the same as that of kubectl version –short.
    • --volume-host-cidr-denylist and --volume-host-allow-local-loopback are removed from kube-controller-manager. --volume-host-cidr-denylist is a comma-separated list of CIDR ranges. Volume plugins at these IP addresses are not allowed. If --volume-host-allow-local-loopback is set to false, the local loopback IP address and the CIDR ranges specified in --volume-host-cidr-denylist are disabled.
    • --azure-container-registry-config is deprecated in kubelet and will be deleted in later Kubernetes versions. Use --image-credential-provider-config and --image-credential-provider-bin-dir instead.
    • --lock-object-namespace and --lock-object-name are removed from kube-scheduler. Use --leader-elect-resource-namespace and --leader-elect-resource-name or ComponentConfig instead. (--lock-object-namespace is used to define the namespace of a lock object, and --lock-object-name is used to define the name of a lock object.)
    • KMS v1 is deprecated and will only receive security updates. Use KMS v2 instead. In later Kubernetes versions, use --feature-gates=KMSv1=true to configure a KMS v1 provider.
    • The DelegateFSGroupToCSIDriver, DevicePlugins, KubeletCredentialProviders, MixedProtocolLBService, ServiceInternalTrafficPolicy, ServiceIPStaticSubrange, and EndpointSliceTerminatingCondition feature gates are removed.

    Enhanced Kubernetes 1.28 on CCE

    During a version maintenance period, CCE periodically updates Kubernetes 1.28 and provides enhanced functions.

    For details about cluster version updates, see Release Notes for CCE Cluster Versions.

    diff --git a/docs/cce/umn/cce_bulletin_0089.html b/docs/cce/umn/cce_bulletin_0089.html index effb5a6a..89325a15 100644 --- a/docs/cce/umn/cce_bulletin_0089.html +++ b/docs/cce/umn/cce_bulletin_0089.html @@ -13,20 +13,20 @@
  • The clusterTrustBundle projected volumes are in the alpha state.

    The clusterTrustBundle projected volumes are promoted to alpha. With this feature enabled, the clusterTrustBundle projected volume source injects the contents of one or more ClusterTrustBundle objects as an automatically-updating file. For details, see clusterTrustBundle projected volumes.

  • Pulling images based on runtime classes of pods is in the alpha state.

    Pulling images based on runtime classes is promoted to alpha. With this feature enabled, the kubelet references container images by a tuple (of image name or runtime handler) rather than just the image name or digest. Your container runtime may adapt its behavior based on the selected runtime handler. Pulling images based on runtime classes will be helpful for VM based containers. For details, see Image pull per runtime class.

  • The PodReadyToStartContainers condition is in the beta state.

    The PodReadyToStartContainers condition is promoted to beta. Kubernetes 1.29 introduces the PodReadyToStartContainers condition to the pods' status field. If it is set to true, the sandbox of a pod is ready and service containers can be created. This feature enables cluster administrators to gain a clearer and more comprehensive view of pod sandbox creation completion and container readiness. This enhanced visibility allows them to make better-informed decisions and troubleshoot issues more effectively. For details, see PodReadyToStartContainers Condition Moves to Beta.

    -
  • Two Job-related features are in the beta state.
    • Pod replacement policy (beta)

      The pod replacement policy feature moves to beta. This feature ensures that a pod is replaced only when it reaches the Failed state, which means that status.phase becomes Failed. It does not recreate a pod when the deletion timestamp is not empty and the pod is still being deleted. This prevents two pods from occupying index and node resources concurrently.

      -
    • Backoff limit per index (beta)

      The backoff limit per index moves to beta. By default, pod failures for indexed jobs are counted and restricted by the global limit of retries, specified by .spec.backoffLimit. This means that if there is a consistently failing index in a job, pods specified by the job will be restarted repeatedly until pod failures exhaust the limit. Once the limit is reached, the job is marked failed and pods for other indexes in the job may never be even started. The feature allows you to complete execution of all indexes, despite some indexes failing, and to better use the computing resources by avoiding unnecessary retries of consistently failing indexes.

      +
    • Two job-related features are in the beta state.
      • Pod replacement policy (beta)

        The pod replacement policy feature moves to beta. This feature ensures that a pod is replaced only when it reaches the Failed state, which means that status.phase becomes Failed. It does not recreate a pod when the deletion timestamp is not empty and the pod is still being deleted. This prevents two pods from occupying index and node resources concurrently.

        +
      • Backoff limit per index (beta)

        The backoff limit per index moves to beta. By default, pod failures for indexed jobs are counted and restricted by the global limit of retries, specified by .spec.backoffLimit. This means that if there is a consistently failing index in a job, pods specified by the job will be restarted repeatedly until pod failures exhaust the limit. Once the limit is reached, the job is marked failed and pods for other indexes in the job may never be even started. The feature allows you to complete execution of all indexes, despite some indexes failing, and to better use the compute resources by avoiding unnecessary retries of consistently failing indexes.

    • Native sidecar containers are in the beta state.

      Native sidecar containers are promoted to beta. The restartPolicy field is added to initContainers. When this field is set to Always, the sidecar container is enabled. The sidecar container and service container are deployed in the same pod. This cannot prolong the pod lifecycle. Sidecar containers are commonly used in scenarios such as network proxy and log collection. For details, see Sidecar Containers.

    • The legacy ServiceAccount token cleaner is in the beta state.

      Legacy ServiceAccount token cleaner is promoted to beta. It runs as part of kube-controller-manager and checks every 24 hours to see if any auto-generated legacy ServiceAccount token has not been used in a specific amount of time (one year by default, specified by --legacy-service-account-token-clean-up-period). If so, the cleaner marks those tokens as invalid and adds the kubernetes.io/legacy-token-invalid-since label whose value is the current date. If an invalid token is not used for a specific period of time (one year by default, specified by --legacy-service-account-token-clean-up-period), the cleaner deletes it. For details, see Legacy ServiceAccount Token Cleaner.

    • DevicePluginCDIDevices is in the beta state.

      DevicePluginCDIDevices moves to beta. With this feature enabled, plugin developers can use the CDIDevices field added to DeviceRunContainerOptions to pass CDI device names directly to CDI enabled runtimes.

    • PodHostIPs is in the beta state.

      The PodHostIPs feature moves to beta. With this feature enabled, Kubernetes adds the hostIPs field to Status of pods and downward API to expose node IP addresses to workloads. This field specifies the dual-stack protocol version of the host IP address. The first IP address is always the same as the host IP address.

      -
    • The API Priority and Fairness feature (APF) is in the General Availability (GA) state.

      APF moves to GA. APF classifies and isolates requests in a more fine-grained way. It improves max-inflight limitations. It also introduces a limited amount of queuing, so that the API server does not reject any request in cases of very brief bursts. Requests are dispatched from queues using a fair queuing technique so that, for example, a poorly-behaved controller does not cause others (even at the same priority level) to become abnormal. For details, see API Priority and Fairness.

      +
    • The API Priority and Fairness feature (APF) is in the GA state.

      APF moves to GA. APF classifies and isolates requests in a more fine-grained way. It improves max-inflight limitations. It also introduces a limited amount of queuing, so that the API server does not reject any request in cases of very brief bursts. Requests are dispatched from queues using a fair queuing technique so that, for example, a poorly-behaved controller does not cause others (even at the same priority level) to become abnormal. For details, see API Priority and Fairness.

    • APIListChunking is in the GA state.

      The APIListChunking feature moves to GA. This feature allows clients to perform pagination in List requests to avoid performance problems caused by returning too much data at a time.

    • ServiceNodePortStaticSubrange is in the GA state.

      The ServiceNodePortStaticSubrange feature moves to GA. With this feature enabled, kubelet calculates the size of reserved IP addresses based on the ranges of the NodePort Services and divides node ports into static band and dynamic band. During automatic node port assignment, dynamic band is preferentially assigned, which helps avoid port conflicts during static band assignment. For details, see ServiceNodePortStaticSubrange.

    • The phase transition timestamp of PersistentVolume (PV) is in the beta state.

      The PV phase transition timestamp moves to beta. With this feature enabled, Kubernetes adds the lastPhaseTransitionTime field to the status field of a PV to indicate the time when the PV phase changes last time. Cluster administrators are now able to track the last time a PV transitioned to a different phase, allowing for more efficient and informed resource management. For details, see PersistentVolume Last Phase Transition Time in Kubernetes.

    • ReadWriteOncePod is in the GA state.

      The ReadWriteOncePod feature moves to GA. With this feature enabled, you can set the access mode to ReadWriteOncePod in a PersistentVolumeClaim (PVC) to ensure that only one pod can modify data in the volume at a time. This can prevent data conflicts or damage. For details, see ReadWriteOncePod.

    • CSINodeExpandSecret is in the GA state.

      The CSINodeExpandSecret feature moves to GA. This feature allows secret authentication data to be passed to a CSI driver for use when a node is added.

      -
    • The Common Expression Language (CEL)-based CRD verification capability is in the GA state.

      The CEL-based CRD verification capability moves to GA. With this feature enabled, you are allowed to use the CEL to define validation rules in CRDs, which are more efficient than webhook. For details, see CRD verification rules.

      +
    • The CEL-based CustomResourceDefinition (CRD) verification capability is in the GA state.

      The CEL-based CRD verification capability moves to GA. With this feature enabled, you are allowed to use the CEL to define validation rules in CRDs, which are more efficient than webhook. For details, see CRD verification rules.

  • API Changes and Removals

    • The time zone of a newly created cron job cannot be configured using TZ or CRON_TZ in .spec.schedule. Use .spec.timeZone instead. Cron jobs that have been created are not affected by this change.
    • The alpha API ClusterCIDR is removed.
    • The startup parameter --authentication-config is added to kube-apiserver to specify the address of the AuthenticationConfiguration file. This startup parameter is mutually exclusive with the --oidc-* startup parameter.
    • The API version kubescheduler.config.k8s.io/v1beta3 of KubeSchedulerConfiguration is removed. Migrate kube-scheduler configuration files to kubescheduler.config.k8s.io/v1.
    • The CEL expressions are added to v1alpha1 AuthenticationConfiguration.
    • The ServiceCIDR type is added. It allows you to dynamically configure the IP address range used by a cluster to allocate the Service ClusterIPs.
    • The startup parameters --conntrack-udp-timeout and --conntrack-udp-timeout-stream are added to kube-proxy. They are options for configuring the kernel parameters nf_conntrack_udp_timeout and nf_conntrack_udp_timeout_stream.
    • Support for CEL expressions is added to WebhookMatchCondition of v1alpha1 AuthenticationConfiguration.
    • The type of PVC.spec.Resource is changed from ResourceRequirements to VolumeResourceRequirements.
    • onPodConditions in PodFailurePolicyRule is marked as optional.
    • The API version flowcontrol.apiserver.k8s.io/v1beta3 of FlowSchema and PriorityLevelConfiguration has been promoted to flowcontrol.apiserver.k8s.io/v1, and the following changes have been made:
      • PriorityLevelConfiguration: The .spec.limited.nominalConcurrencyShares field defaults to 30 if the field is omitted. To ensure compatibility with 1.28 API servers, specifying an explicit 0 value is not allowed in the v1 version in 1.29. In 1.30, explicit 0 values will be allowed in this field in the v1 API. The flowcontrol.apiserver.k8s.io/v1beta3 APIs are deprecated and will no longer be served in 1.32.
      diff --git a/docs/cce/umn/cce_faq_00097.html b/docs/cce/umn/cce_faq_00097.html index 7daac650..e8d8481f 100644 --- a/docs/cce/umn/cce_faq_00097.html +++ b/docs/cce/umn/cce_faq_00097.html @@ -5,7 +5,7 @@
      • While an ECS is being accepted into a cluster, the operating system of the ECS will be reset to the standard OS image provided by CCE to ensure node stability. The CCE console prompts you to select the operating system and the login mode during the reset.
      • The ECS system and data disks will be formatted while the ECS is being accepted into a cluster. Ensure that data in the disks has been backed up.
      • While an ECS is being accepted into a cluster, do not perform any operation on the ECS through the ECS console.
    -

    Notes and Constraints

    • ECSs and BMSs can be managed.
    +

    Notes and Constraints

    • ECSs can be managed.

    Prerequisites

    The cloud servers to be managed must meet the following requirements:

    • The node to be accepted must be in the Running state and not used by other clusters. In addition, the node to be accepted does not carry the CCE-Dynamic-Provisioning-Node tag.
    • The node to be accepted and the cluster must be in the same VPC. (If the cluster version is earlier than v1.13.10, the node to be accepted and the CCE cluster must be in the same subnet.)
    • Data disks must be attached to the nodes to be managed. A local disk (disk-intensive disk) or a data disk of at least 20 GiB can be attached to the node, and any data disks already attached cannot be smaller than 10 GiB.
    • The node to be accepted has 2-core or higher CPU, 4 GiB or larger memory, and only one NIC.
    • Only cloud servers with the same data disk configurations can be added in batches.
    • If IPv6 is enabled for a cluster, only nodes in a subnet with IPv6 enabled can be accepted and managed. If IPv6 is not enabled for the cluster, only nodes in a subnet without IPv6 enabled can be accepted.
    • Nodes in a CCE Turbo cluster must support sub-ENIs or be bound to at least 16 ENIs. For details about the node flavors, see the node flavors that can be selected on the console when you create a node.
    • Data disks that have been partitioned will be ignored during node management. Ensure that there is at least one unpartitioned data disk meeting the specifications is attached to the node.
    diff --git a/docs/cce/umn/cce_faq_00098.html b/docs/cce/umn/cce_faq_00098.html index 6f921cff..6f9ad4bb 100644 --- a/docs/cce/umn/cce_faq_00098.html +++ b/docs/cce/umn/cce_faq_00098.html @@ -5,7 +5,7 @@

    Troubleshooting Process

    Determine the cause based on the event information, as listed in Table 1.

    -
    Table 1 Pod scheduling failure

    Event Information

    +
    diff --git a/docs/cce/umn/cce_faq_00154.html b/docs/cce/umn/cce_faq_00154.html index 44f125c6..7f3bfd68 100644 --- a/docs/cce/umn/cce_faq_00154.html +++ b/docs/cce/umn/cce_faq_00154.html @@ -2,7 +2,7 @@

    Which Resource Quotas Should I Pay Attention To When Using CCE?

    CCE restricts only the number of clusters. However, when using CCE, you may also be using other cloud services, such as Elastic Cloud Server (ECS), Elastic Volume Service (EVS), Virtual Private Cloud (VPC), Elastic Load Balance (ELB), and SoftWare Repository for Containers (SWR).

    -

    What Is Quota?

    Quotas can limit the number or amount of resources available to users, such as the maximum number of ECSs or EVS disks that can be created.

    +

    What Is Quota?

    Quotas can limit the number or amount of resources available to users, such as the maximum number of ECS or EVS disks that can be created.

    If the existing resource quota cannot meet your service requirements, you can apply for a higher quota.

    diff --git a/docs/cce/umn/cce_faq_00215.html b/docs/cce/umn/cce_faq_00215.html index a6af96a1..2fb7d852 100644 --- a/docs/cce/umn/cce_faq_00215.html +++ b/docs/cce/umn/cce_faq_00215.html @@ -8,6 +8,8 @@ +
    Table 1 Pod scheduling failure

    Event

    Cause and Solution

    diff --git a/docs/cce/umn/cce_whsnew_0007.html b/docs/cce/umn/cce_whsnew_0007.html index cf7d5c65..40dafbe3 100644 --- a/docs/cce/umn/cce_whsnew_0007.html +++ b/docs/cce/umn/cce_whsnew_0007.html @@ -1,7 +1,7 @@

    Kubernetes 1.17 (EOM) Release Notes

    -

    CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.17.

    +

    This section describes the updates in CCE Kubernetes 1.17.

    Resource Changes and Deprecations

    • All resources in the apps/v1beta1 and apps/v1beta2 API versions are no longer served. Migrate to use the apps/v1 API version.
    • DaemonSets, Deployments, and ReplicaSets in the extensions/v1beta1 API version are no longer served. You can use the apps/v1 API version.
    • NetworkPolicies in the extensions/v1beta1 API version are no longer served. Migrate to use the networking.k8s.io/v1 API version.
    • PodSecurityPolicies in the extensions/v1beta1 API version are no longer served. Migrate to use the policy/v1beta1 API version.
    • Ingresses in the extensions/v1beta1 API version will no longer be served in v1.20. Migrate to use the networking.k8s.io/v1beta1 API version.
    • PriorityClass in the scheduling.k8s.io/v1beta1 and scheduling.k8s.io/v1alpha1 API versions is no longer served in v1.17. Migrate to use the scheduling.k8s.io/v1 API version.
    • The event series.state field in the events.k8s.io/v1beta1 API version has been deprecated and will be removed from v1.18.
    • CustomResourceDefinition in the apiextensions.k8s.io/v1beta1 API version has been deprecated and will no longer be served in v1.19. Use the apiextensions.k8s.io/v1 API version.
    • MutatingWebhookConfiguration and ValidatingWebhookConfiguration in the admissionregistration.k8s.io/v1beta1 API version have been deprecated and will no longer be served in v1.19. You can use the admissionregistration.k8s.io/v1 API version.
    • The rbac.authorization.k8s.io/v1alpha1 and rbac.authorization.k8s.io/v1beta1 API versions have been deprecated and will no longer be served in v1.20. Use the rbac.authorization.k8s.io/v1 API version.
    • The CSINode object of storage.k8s.io/v1beta1 has been deprecated and will be removed in later versions.

    Other Deprecations and Removals

    • OutOfDisk node condition is removed in favor of DiskPressure.
    • The scheduler.alpha.kubernetes.io/critical-pod annotation is removed in favor of priorityClassName.
    • beta.kubernetes.io/os and beta.kubernetes.io/arch have been deprecated in v1.14 and will be removed in v1.18.
    • Do not use --node-labels to set labels prefixed with kubernetes.io and k8s.io. The kubernetes.io/availablezone label in earlier versions is removed in v1.17 and changed to failure-domain.beta.kubernetes.io/zone.
    • The beta.kubernetes.io/instance-type is deprecated in favor of node.kubernetes.io/instance-type.
    • Remove the {kubelet_root_dir}/plugins path.
    • Remove the built-in cluster roles system:csi-external-provisioner and system:csi-external-attacher.
    diff --git a/docs/cce/umn/cce_whsnew_0010.html b/docs/cce/umn/cce_whsnew_0010.html index 7cb647f4..a84219f3 100644 --- a/docs/cce/umn/cce_whsnew_0010.html +++ b/docs/cce/umn/cce_whsnew_0010.html @@ -1,7 +1,7 @@

    Kubernetes 1.19 (EOM) Release Notes

    -

    CCE has passed the Certified Kubernetes Conformance Program and is a certified Kubernetes offering. This section describes the updates in CCE Kubernetes 1.19.

    +

    This section describes the updates in CCE Kubernetes 1.19.

    Resource Changes and Deprecations

    Kubernetes v1.19 Release Notes

    • vSphere in-tree volumes can be migrated to vSphere CSI drivers. The in-tree vSphere Volume plugin is no longer used and will be deleted in later versions.
    • apiextensions.k8s.io/v1beta1 has been deprecated. You are advised to use apiextensions.k8s.io/v1.
    • apiregistration.k8s.io/v1beta1 has been deprecated. You are advised to use apiregistration.k8s.io/v1.
    • authentication.k8s.io/v1beta1 and authorization.k8s.io/v1beta1 have been deprecated and will be removed from Kubernetes 1.22. You are advised to use authentication.k8s.io/v1 and authorization.k8s.io/v1.
    • autoscaling/v2beta1 has been deprecated. You are advised to use autoscaling/v2beta2.
    • coordination.k8s.io/v1beta1 has been deprecated in Kubernetes 1.19 and will be removed from version 1.22. You are advised to use coordination.k8s.io/v1.
    • kube-apiserver: The componentstatus API has been deprecated.
    • kubeadm: The kubeadm config view command has been deprecated and will be deleted in later versions. Use kubectl get cm -o yaml -n kube-system kubeadm-config to directly obtain the kubeadm configuration.
    • kubeadm: The kubeadm alpha kubelet config enable-dynamic command has been deprecated.
    • kubeadm: The --use-api flag in the kubeadm alpha certs renew command has been deprecated.
    • Kubernetes no longer supports hyperkube image creation.
    • The --export flag is removed from the kubectl get command.
    • The alpha feature ResourceLimitsPriorityFunction has been deleted.
    • storage.k8s.io/v1beta1 has been deprecated. You are advised to use storage.k8s.io/v1.

    Kubernetes v1.18 Release Notes

    diff --git a/docs/cce/umn/en-us_image_0000001187249376.png b/docs/cce/umn/en-us_image_0000001187249376.png new file mode 100644 index 00000000..582b1618 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001187249376.png differ diff --git a/docs/cce/umn/en-us_image_0000001619094530.png b/docs/cce/umn/en-us_image_0000001619094530.png new file mode 100644 index 00000000..5ab58faf Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001619094530.png differ diff --git a/docs/cce/umn/en-us_image_0000001667694873.png b/docs/cce/umn/en-us_image_0000001667694873.png new file mode 100644 index 00000000..d3b0f698 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001667694873.png differ diff --git a/docs/cce/umn/en-us_image_0000001667734001.png b/docs/cce/umn/en-us_image_0000001667734001.png new file mode 100644 index 00000000..6b11888b Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001667734001.png differ diff --git a/docs/cce/umn/en-us_image_0000001696838318.png b/docs/cce/umn/en-us_image_0000001696838318.png deleted file mode 100644 index e4fcbfa9..00000000 Binary files a/docs/cce/umn/en-us_image_0000001696838318.png and /dev/null differ diff --git a/docs/cce/umn/en-us_image_0000001942942816.png b/docs/cce/umn/en-us_image_0000001942942816.png new file mode 100644 index 00000000..31b212be Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001942942816.png differ diff --git a/docs/cce/umn/en-us_image_0000001981275653.png b/docs/cce/umn/en-us_image_0000001981275653.png new file mode 100644 index 00000000..dfffc336 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001981275653.png differ diff --git a/docs/cce/umn/en-us_image_0000001981275657.png b/docs/cce/umn/en-us_image_0000001981275657.png new file mode 100644 index 00000000..4b0e3796 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001981275657.png differ diff --git a/docs/cce/umn/en-us_image_0000001981276949.png b/docs/cce/umn/en-us_image_0000001981276949.png new file mode 100644 index 00000000..763c4938 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001981276949.png differ diff --git a/docs/cce/umn/en-us_image_0000001981435505.png b/docs/cce/umn/en-us_image_0000001981435505.png new file mode 100644 index 00000000..4910f4f4 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001981435505.png differ diff --git a/docs/cce/umn/en-us_image_0000001981436185.png b/docs/cce/umn/en-us_image_0000001981436185.png new file mode 100644 index 00000000..0677b34e Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001981436185.png differ diff --git a/docs/cce/umn/en-us_image_0000001981436501.png b/docs/cce/umn/en-us_image_0000001981436501.png new file mode 100644 index 00000000..d830d626 Binary files /dev/null and b/docs/cce/umn/en-us_image_0000001981436501.png differ
    Table 1 Relationships between CCE and other services

    Service