diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index 164005a0d87e3..f62e3b7b953e7 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -325,7 +325,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command { cmd.Flags().StringVar(&options.EtcdStorageType, "etcd-storage-type", options.EtcdStorageType, "The default storage type for etcd members") cmd.RegisterFlagCompletionFunc("etcd-storage-type", completeStorageType) - cmd.Flags().StringVar(&options.Networking, "networking", options.Networking, "Networking mode. kubenet, external, flannel-vxlan (or flannel), flannel-udp, calico, kube-router, amazonvpc, cilium, cilium-etcd, kindnet, cni.") + cmd.Flags().StringVar(&options.Networking, "networking", options.Networking, "Networking mode. kubenet, external, flannel-vxlan (or flannel), flannel-udp, calico, kube-router, amazonvpc, cilium, gcp-with-cilium, cilium-etcd, kindnet, cni.") cmd.RegisterFlagCompletionFunc("networking", completeNetworking(options)) cmd.Flags().StringVar(&options.DNSZone, "dns-zone", options.DNSZone, "DNS hosted zone (defaults to longest matching zone)") @@ -1016,7 +1016,7 @@ func completeNetworking(options *CreateClusterOptions) func(cmd *cobra.Command, } if options.CloudProvider == "gce" || options.CloudProvider == "" { - completions = append(completions, "gcp") + completions = append(completions, "gcp", "gcp-with-cilium") } } diff --git a/cmd/kops/create_cluster_integration_test.go b/cmd/kops/create_cluster_integration_test.go index 5f5723da7ec73..c89dd1a20f73e 100644 --- a/cmd/kops/create_cluster_integration_test.go +++ b/cmd/kops/create_cluster_integration_test.go @@ -81,6 +81,11 @@ func TestCreateClusterCilium(t *testing.T) { runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/cilium-eni", "v1alpha2") } +// TestCreateClusterCiliumGCE runs kops with the gcp-with-cilium networking flag +func TestCreateClusterCiliumGCE(t *testing.T) { + runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/cilium-gce", "v1alpha2") +} + // TestCreateClusterOverride tests the override flag func TestCreateClusterOverride(t *testing.T) { runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/overrides", "v1alpha2") diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 649df842a3da8..ccc4a8cb317d8 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -541,6 +541,15 @@ func TestMinimalIPv6Cilium(t *testing.T) { runTestTerraformAWS(t) } +func TestCiliumGCE(t *testing.T) { + newIntegrationTest("cilium-gce.example.com", "cilium-gce"). + withAddons( + ciliumAddon, + dnsControllerAddon, + gcpCCMAddon). + runTestTerraformGCE(t) +} + // TestMinimalIPv6NoSubnetPrefix runs the test with "/64#N" subnet notation func TestMinimalIPv6NoSubnetPrefix(t *testing.T) { newIntegrationTest("minimal-ipv6.example.com", "minimal-ipv6-no-subnet-prefix"). diff --git a/docs/cli/kops_create_cluster.md b/docs/cli/kops_create_cluster.md index bed518bbd0605..26320dde70575 100644 --- a/docs/cli/kops_create_cluster.md +++ b/docs/cli/kops_create_cluster.md @@ -100,7 +100,7 @@ kops create cluster [CLUSTER] [flags] --kubernetes-version string Version of Kubernetes to run (defaults to version in channel) --network-cidr strings Network CIDR(s) to use --network-id string Shared Network or VPC to use - --networking string Networking mode. kubenet, external, flannel-vxlan (or flannel), flannel-udp, calico, kube-router, amazonvpc, cilium, cilium-etcd, kindnet, cni. (default "cilium") + --networking string Networking mode. kubenet, external, flannel-vxlan (or flannel), flannel-udp, calico, kube-router, amazonvpc, cilium, gcp-with-cilium, cilium-etcd, kindnet, cni. (default "cilium") --node-count int32 Total number of worker nodes. Defaults to one node per zone --node-image string Machine image for worker nodes. Takes precedence over --image --node-security-groups strings Additional pre-created security groups to add to worker nodes. diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index d5042a4eaf2a1..0fb3efb15f7bd 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -5948,6 +5948,526 @@ spec: gce: description: GCPNetworkingSpec is the specification of GCP's native networking mode, using IP aliases. + properties: + cilium: + description: Cilium enables Cilium on GCP. + properties: + IPTablesRulesNoinstall: + description: |- + IPTablesRulesNoinstall disables installing the base IPTables rules used for masquerading and kube-proxy. + Default: false + type: boolean + accessLog: + description: AccessLog is unused. + type: string + agentLabels: + description: AgentLabels is unused. + items: + type: string + type: array + agentPodAnnotations: + additionalProperties: + type: string + description: |- + AgentPodAnnotations makes possible to add additional annotations to the cilium agent. + Default: none + type: object + agentPrometheusPort: + description: |- + AgentPrometheusPort is the port to listen to for Prometheus metrics. + Defaults to 9090. + type: integer + allowLocalhost: + description: AllowLocalhost is unused. + type: string + autoDirectNodeRoutes: + description: |- + AutoDirectNodeRoutes adds automatic L2 routing between nodes. + Default: false + type: boolean + autoIpv6NodeRoutes: + description: AutoIpv6NodeRoutes is unused. + type: boolean + bpfCTGlobalAnyMax: + description: |- + BPFCTGlobalAnyMax is the maximum number of entries in the non-TCP CT table. + Default: 262144 + type: integer + bpfCTGlobalTCPMax: + description: |- + BPFCTGlobalTCPMax is the maximum number of entries in the TCP CT table. + Default: 524288 + type: integer + bpfLBAlgorithm: + description: |- + BPFLBAlgorithm is the load balancing algorithm ("random", "maglev"). + Default: random + type: string + bpfLBMaglevTableSize: + description: |- + BPFLBMaglevTableSize is the per service backend table size when going with Maglev (parameter M). + Default: 16381 + type: string + bpfLBMapMax: + description: |- + BPFLBMapMax is the maximum number of entries in bpf lb service, backend and affinity maps. + Default: 65536 + type: integer + bpfLBSockHostNSOnly: + description: |- + BPFLBSockHostNSOnly enables skipping socket LB for services when inside a pod namespace, + in favor of service LB at the pod interface. Socket LB is still used when in the host namespace. + Required by service mesh (e.g., Istio, Linkerd). + Default: false + type: boolean + bpfNATGlobalMax: + description: |- + BPFNATGlobalMax is the the maximum number of entries in the BPF NAT table. + Default: 524288 + type: integer + bpfNeighGlobalMax: + description: |- + BPFNeighGlobalMax is the the maximum number of entries in the BPF Neighbor table. + Default: 524288 + type: integer + bpfPolicyMapMax: + description: |- + BPFPolicyMapMax is the maximum number of entries in endpoint policy map. + Default: 16384 + type: integer + bpfRoot: + description: BPFRoot is unused. + type: string + chainingMode: + description: |- + ChainingMode allows using Cilium in combination with other CNI plugins. + With Cilium CNI chaining, the base network connectivity and IP address management is managed + by the non-Cilium CNI plugin, but Cilium attaches eBPF programs to the network devices created + by the non-Cilium plugin to provide L3/L4 network visibility, policy enforcement and other advanced features. + Default: none + type: string + clusterID: + description: |- + ClusterID is the ID of the cluster. It is only relevant when building a mesh of clusters. + Must be a number between 1 and 255. + type: integer + clusterName: + description: ClusterName is the name of the cluster. It + is only relevant when building a mesh of clusters. + type: string + cniBinPath: + description: CniBinPath is unused. + type: string + cniExclusive: + description: |- + CniExclusive configures whether to remove other CNI configuration files. + Default: true + type: boolean + containerRuntime: + description: ContainerRuntime is unused. + items: + type: string + type: array + containerRuntimeEndpoint: + additionalProperties: + type: string + description: ContainerRuntimeEndpoint is unused. + type: object + containerRuntimeLabels: + description: ContainerRuntimeLabels is unused. + type: string + cpuRequest: + anyOf: + - type: integer + - type: string + description: 'CPURequest CPU request of Cilium agent + + operator container. (default: 25m)' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + debug: + description: Debug runs Cilium in debug mode. + type: boolean + debugVerbose: + description: DebugVerbose is unused. + items: + type: string + type: array + device: + description: Device is unused. + type: string + disableCNPStatusUpdates: + description: DisableCNPStatusUpdates determines if CNP + NodeStatus updates will be sent to the Kubernetes api-server. + type: boolean + disableConntrack: + description: DisableConntrack is unused. + type: boolean + disableEndpointCRD: + description: |- + DisableEndpointCRD disables usage of CiliumEndpoint CRD. + Default: false + type: boolean + disableIpv4: + description: DisableIpv4 is unused. + type: boolean + disableK8sServices: + description: DisableK8sServices is unused. + type: boolean + disableMasquerade: + description: DisableMasquerade disables masquerading traffic + to external destinations behind the node IP. + type: boolean + enableBPFMasquerade: + description: |- + EnableBPFMasquerade enables masquerading packets from endpoints leaving the host with BPF instead of iptables. + Default: false + type: boolean + enableEncryption: + description: |- + EnableEncryption enables Cilium Encryption. + Default: false + type: boolean + enableEndpointHealthChecking: + description: |- + EnableEndpointHealthChecking enables connectivity health checking between virtual endpoints. + Default: true + type: boolean + enableHostReachableServices: + description: |- + EnableHostReachableServices configures Cilium to enable services to be + reached from the host namespace in addition to pod namespaces. + https://docs.cilium.io/en/v1.9/gettingstarted/host-services/ + Default: false + type: boolean + enableL7Proxy: + description: |- + EnableL7Proxy enables L7 proxy for L7 policy enforcement. + Default: true + type: boolean + enableLocalRedirectPolicy: + description: |- + EnableLocalRedirectPolicy that enables pod traffic destined to an IP address and port/protocol + tuple or Kubernetes service to be redirected locally to backend pod(s) within a node, using eBPF. + https://docs.cilium.io/en/stable/network/kubernetes/local-redirect-policy/ + Default: false + type: boolean + enableNodePort: + description: |- + EnableNodePort replaces kube-proxy with Cilium's BPF implementation. + Requires spec.kubeProxy.enabled be set to false. + Default: false + type: boolean + enablePolicy: + description: |- + EnablePolicy specifies the policy enforcement mode. + "default": Follows Kubernetes policy enforcement. + "always": Cilium restricts all traffic if no policy is in place. + "never": Cilium allows all traffic regardless of policies in place. + If unspecified, "default" policy mode will be used. + type: string + enablePrometheusMetrics: + description: EnablePrometheusMetrics enables the Cilium + "/metrics" endpoint for both the agent and the operator. + type: boolean + enableRemoteNodeIdentity: + description: |- + EnableRemoteNodeIdentity enables the remote-node-identity. + Default: true + type: boolean + enableServiceTopology: + description: EnableServiceTopology determine if cilium + should use topology aware hints. + type: boolean + enableTracing: + description: EnableTracing is unused. + type: boolean + enableUnreachableRoutes: + description: |- + EnableUnreachableRoutes enables unreachable routes on pod deletion. + Default: false + type: boolean + enableipv4: + description: EnableIpv4 is unused. + type: boolean + enableipv6: + description: EnableIpv6 is unused. + type: boolean + encryptionType: + description: |- + EncryptionType specifies Cilium Encryption method ("ipsec", "wireguard"). + Default: ipsec + type: string + envoyLog: + description: EnvoyLog is unused. + type: string + etcdManaged: + description: |- + EtcdManagd installs an additional etcd cluster that is used for Cilium state change. + The cluster is operated by cilium-etcd-operator. + Default: false + type: boolean + gatewayAPI: + description: GatewayAPI specifies the configuration for + Cilium Gateway API settings. + properties: + enableSecretsSync: + description: |- + EnableSecretsSync specifies whether synchronization of secrets is enabled. + Default: true + type: boolean + enabled: + description: Enabled specifies whether Cilium Gateway + API is enabled. + type: boolean + type: object + hubble: + description: Hubble configures the Hubble service on the + Cilium agent. + properties: + enabled: + description: Enabled decides if Hubble is enabled + on the agent or not + type: boolean + metrics: + description: |- + Metrics is a list of metrics to collect. If empty or null, metrics are disabled. + See https://docs.cilium.io/en/stable/observability/metrics/#hubble-exported-metrics + items: + type: string + type: array + type: object + identityAllocationMode: + description: |- + IdentityAllocationMode specifies in which backend identities are stored ("crd", "kvstore"). + Default: crd + type: string + identityChangeGracePeriod: + description: |- + IdentityChangeGracePeriod specifies the duration to wait before using a changed identity. + Default: 5s + type: string + ingress: + description: Ingress specifies the configuration for Cilium + Ingress settings. + properties: + defaultLoadBalancerMode: + description: |- + DefaultLoadBalancerMode specifies the default load balancer mode. + Possible values: 'shared' or 'dedicated' + Default: dedicated + type: string + enableSecretsSync: + description: |- + EnableSecretsSync specifies whether synchronization of secrets is enabled. + Default: true + type: boolean + enabled: + description: Enabled specifies whether Cilium Ingress + is enabled. + type: boolean + enforceHttps: + description: |- + EnforceHttps specifies whether HTTPS enforcement is enabled for Ingress traffic. + Default: true + type: boolean + loadBalancerAnnotationPrefixes: + description: |- + LoadBalancerAnnotationPrefixes specifies annotation prefixes for Load Balancer configuration. + Default: "service.beta.kubernetes.io service.kubernetes.io cloud.google.com" + type: string + sharedLoadBalancerServiceName: + description: |- + SharedLoadBalancerServiceName specifies the name of the shared load balancer service. + Default: cilium-ingress + type: string + type: object + ipam: + description: |- + IPAM specifies the IP address allocation mode to use. + Possible values are "crd" and "eni". + "eni" will use AWS native networking for pods. Eni requires masquerade to be set to false. + "crd" will use CRDs for controlling IP address management. + "hostscope" will use hostscope IPAM mode. + "kubernetes" will use addersing based on node pod CIDR. + Default: "kubernetes". + type: string + ipv4ClusterCidrMaskSize: + description: Ipv4ClusterCIDRMaskSize is unused. + type: integer + ipv4Node: + description: Ipv4Node is unused. + type: string + ipv4Range: + description: Ipv4Range is unused. + type: string + ipv4ServiceRange: + description: Ipv4ServiceRange is unused. + type: string + ipv6ClusterAllocCidr: + description: Ipv6ClusterAllocCidr is unused. + type: string + ipv6Node: + description: Ipv6Node is unused. + type: string + ipv6Range: + description: Ipv6Range is unused. + type: string + ipv6ServiceRange: + description: Ipv6ServiceRange is unused. + type: string + k8sApiServer: + description: K8sAPIServer is unused. + type: string + k8sKubeconfigPath: + description: K8sKubeconfigPath is unused. + type: string + keepBpfTemplates: + description: KeepBPFTemplates is unused. + type: boolean + keepConfig: + description: KeepConfig is unused. + type: boolean + labelPrefixFile: + description: LabelPrefixFile is unused. + type: string + labels: + description: Labels is unused. + items: + type: string + type: array + lb: + description: LB is unused. + type: string + libDir: + description: LibDir is unused. + type: string + logDriver: + description: LogDrivers is unused. + items: + type: string + type: array + logOpt: + additionalProperties: + type: string + description: LogOpt is unused. + type: object + logstash: + description: Logstash is unused. + type: boolean + logstashAgent: + description: LogstashAgent is unused. + type: string + logstashProbeTimer: + description: LogstashProbeTimer is unused. + format: int32 + type: integer + memoryRequest: + anyOf: + - type: integer + - type: string + description: 'MemoryRequest memory request of Cilium agent + + operator container. (default: 128Mi)' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + metrics: + description: Metrics is a list of metrics to add or remove + from the default list of metrics the agent exposes. + items: + type: string + type: array + monitorAggregation: + description: |- + MonitorAggregation sets the level of packet monitoring. Possible values are "low", "medium", or "maximum". + Default: medium + type: string + nat46Range: + description: Nat46Range is unused. + type: string + nodeEncryption: + description: |- + NodeEncryption enables encryption for pure node to node traffic. + Default: false + type: boolean + nodeInitBootstrapFile: + description: NodeInitBootstrapFile is unused. + type: string + operatorPodAnnotations: + additionalProperties: + type: string + description: |- + OperatorPodAnnotations makes possible to add additional annotations to cilium operator. + Default: none + type: object + pprof: + description: Pprof is unused. + type: boolean + preallocateBPFMaps: + description: |- + PreallocateBPFMaps reduces the per-packet latency at the expense of up-front memory allocation. + Default: true + type: boolean + prefilterDevice: + description: PrefilterDevice is unused. + type: string + prometheusServeAddr: + description: PrometheusServeAddr is unused. + type: string + reconfigureKubelet: + description: ReconfigureKubelet is unused. + type: boolean + registry: + description: Registry overrides the default Cilium container + registry (quay.io) + type: string + removeCbrBridge: + description: RemoveCbrBridge is unused. + type: boolean + restartPods: + description: RestartPods is unused. + type: boolean + restore: + description: Restore is unused. + type: boolean + sidecarIstioProxyImage: + description: |- + SidecarIstioProxyImage is the regular expression matching compatible Istio sidecar istio-proxy + container image names. + Default: cilium/istio_proxy + type: string + singleClusterRoute: + description: SingleClusterRoute is unused. + type: boolean + socketPath: + description: SocketPath is unused. + type: string + stateDir: + description: StateDir is unused. + type: string + toFqdnsDnsRejectResponseCode: + description: |- + ToFQDNsDNSRejectResponseCode sets the DNS response code for rejecting DNS requests. + Possible values are "nameError" or "refused". + Default: refused + type: string + toFqdnsEnablePoller: + description: |- + ToFQDNsEnablePoller replaces the DNS proxy-based implementation of FQDN policies + with the less powerful legacy implementation. + Default: false + type: boolean + tracePayloadlen: + description: TracePayloadLen is unused. + type: integer + tunnel: + description: |- + Tunnel specifies the Cilium tunnelling mode. Possible values are "vxlan", "geneve", or "disabled". + Default: vxlan + type: string + version: + description: Version is the version of the Cilium agent + and the Cilium Operator. + type: string + type: object type: object kindnet: description: KindnetNetworkingSpec configures Kindnet settings. diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index a92373ed7cd37..3ffaef013df09 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -955,7 +955,8 @@ func (c *Cluster) UsesNoneDNS() bool { func (c *Cluster) InstallCNIAssets() bool { return c.Spec.Networking.AmazonVPC == nil && c.Spec.Networking.Calico == nil && - c.Spec.Networking.Cilium == nil + c.Spec.Networking.Cilium == nil && + !c.Spec.Networking.NetworkingIsGCPCilium() } func (c *Cluster) HasImageVolumesSupport() bool { diff --git a/pkg/apis/kops/model/features.go b/pkg/apis/kops/model/features.go index b3397b916021d..11d7626f79387 100644 --- a/pkg/apis/kops/model/features.go +++ b/pkg/apis/kops/model/features.go @@ -54,7 +54,7 @@ func UseKopsControllerForNodeConfig(cluster *kops.Cluster) bool { // UseCiliumEtcd is true if we are using the Cilium etcd cluster. func UseCiliumEtcd(cluster *kops.Cluster) bool { - if cluster.Spec.Networking.Cilium == nil { + if cluster.Spec.Networking.Cilium == nil && !cluster.Spec.Networking.NetworkingIsGCPCilium() { return false } diff --git a/pkg/apis/kops/networking.go b/pkg/apis/kops/networking.go index 005c7de74a2c3..264f2c7e0555f 100644 --- a/pkg/apis/kops/networking.go +++ b/pkg/apis/kops/networking.go @@ -103,7 +103,7 @@ func (n *NetworkingSpec) UsesKubenet() bool { } if n.Kubenet != nil { return true - } else if n.GCP != nil { + } else if n.GCP != nil && n.GCP.Cilium == nil { // GCP IP Alias networking is based on kubenet return true } else if n.External != nil { @@ -117,6 +117,11 @@ func (n *NetworkingSpec) UsesKubenet() bool { return false } +// NetworkingIsGCPCilium returns true if our networking is derived from GCP with Cilium +func (n *NetworkingSpec) NetworkingIsGCPCilium() bool { + return n.GCP != nil && n.GCP.Cilium != nil +} + // ClassicNetworkingSpec is the specification of classic networking mode, integrated into kubernetes. // Support been removed since Kubernetes 1.4. type ClassicNetworkingSpec struct{} @@ -584,7 +589,10 @@ type LyftVPCNetworkingSpec struct { } // GCPNetworkingSpec is the specification of GCP's native networking mode, using IP aliases. -type GCPNetworkingSpec struct{} +type GCPNetworkingSpec struct { + // Cilium enables Cilium on GCP. + Cilium *CiliumNetworkingSpec `json:"cilium,omitempty"` +} // KindnetNetworkingSpec configures Kindnet settings. type KindnetNetworkingSpec struct { diff --git a/pkg/apis/kops/v1alpha2/networking.go b/pkg/apis/kops/v1alpha2/networking.go index 7ce2d71a31b7a..0cb12d9892480 100644 --- a/pkg/apis/kops/v1alpha2/networking.go +++ b/pkg/apis/kops/v1alpha2/networking.go @@ -701,7 +701,10 @@ type LyftVPCNetworkingSpec struct { } // GCPNetworkingSpec is the specification of GCP's native networking mode, using IP aliases. -type GCPNetworkingSpec struct{} +type GCPNetworkingSpec struct { + // Cilium enables Cilium on GCP. + Cilium *CiliumNetworkingSpec `json:"cilium,omitempty"` +} // KindnetNetworkingSpec configures Kindnet settings. type KindnetNetworkingSpec struct { diff --git a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go index 762e73d1cabfe..4736107287679 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.conversion.go @@ -4039,6 +4039,15 @@ func Convert_kops_FlannelNetworkingSpec_To_v1alpha2_FlannelNetworkingSpec(in *ko } func autoConvert_v1alpha2_GCPNetworkingSpec_To_kops_GCPNetworkingSpec(in *GCPNetworkingSpec, out *kops.GCPNetworkingSpec, s conversion.Scope) error { + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(kops.CiliumNetworkingSpec) + if err := Convert_v1alpha2_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Cilium = nil + } return nil } @@ -4048,6 +4057,15 @@ func Convert_v1alpha2_GCPNetworkingSpec_To_kops_GCPNetworkingSpec(in *GCPNetwork } func autoConvert_kops_GCPNetworkingSpec_To_v1alpha2_GCPNetworkingSpec(in *kops.GCPNetworkingSpec, out *GCPNetworkingSpec, s conversion.Scope) error { + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(CiliumNetworkingSpec) + if err := Convert_kops_CiliumNetworkingSpec_To_v1alpha2_CiliumNetworkingSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Cilium = nil + } return nil } diff --git a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go index 2efef82ed6fcb..4d8bc726c1a60 100644 --- a/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha2/zz_generated.deepcopy.go @@ -2254,6 +2254,11 @@ func (in *FlannelNetworkingSpec) DeepCopy() *FlannelNetworkingSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPNetworkingSpec) DeepCopyInto(out *GCPNetworkingSpec) { *out = *in + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(CiliumNetworkingSpec) + (*in).DeepCopyInto(*out) + } return } @@ -4982,7 +4987,7 @@ func (in *NetworkingSpec) DeepCopyInto(out *NetworkingSpec) { if in.GCP != nil { in, out := &in.GCP, &out.GCP *out = new(GCPNetworkingSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.Kindnet != nil { in, out := &in.Kindnet, &out.Kindnet diff --git a/pkg/apis/kops/v1alpha3/networking.go b/pkg/apis/kops/v1alpha3/networking.go index 98e0c07be03f2..809c5151a9378 100644 --- a/pkg/apis/kops/v1alpha3/networking.go +++ b/pkg/apis/kops/v1alpha3/networking.go @@ -526,7 +526,10 @@ type HubbleSpec struct { } // GCPNetworkingSpec is the specification of GCP's native networking mode, using IP aliases. -type GCPNetworkingSpec struct{} +type GCPNetworkingSpec struct { + // Cilium enables Cilium on GCP. + Cilium *CiliumNetworkingSpec `json:"cilium,omitempty"` +} // KindnetNetworkingSpec configures Kindnet settings. type KindnetNetworkingSpec struct { diff --git a/pkg/apis/kops/v1alpha3/zz_generated.conversion.go b/pkg/apis/kops/v1alpha3/zz_generated.conversion.go index 1241473a6c7d5..7887ea9277a7c 100644 --- a/pkg/apis/kops/v1alpha3/zz_generated.conversion.go +++ b/pkg/apis/kops/v1alpha3/zz_generated.conversion.go @@ -4358,6 +4358,15 @@ func Convert_kops_GCESpec_To_v1alpha3_GCESpec(in *kops.GCESpec, out *GCESpec, s } func autoConvert_v1alpha3_GCPNetworkingSpec_To_kops_GCPNetworkingSpec(in *GCPNetworkingSpec, out *kops.GCPNetworkingSpec, s conversion.Scope) error { + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(kops.CiliumNetworkingSpec) + if err := Convert_v1alpha3_CiliumNetworkingSpec_To_kops_CiliumNetworkingSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Cilium = nil + } return nil } @@ -4367,6 +4376,15 @@ func Convert_v1alpha3_GCPNetworkingSpec_To_kops_GCPNetworkingSpec(in *GCPNetwork } func autoConvert_kops_GCPNetworkingSpec_To_v1alpha3_GCPNetworkingSpec(in *kops.GCPNetworkingSpec, out *GCPNetworkingSpec, s conversion.Scope) error { + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(CiliumNetworkingSpec) + if err := Convert_kops_CiliumNetworkingSpec_To_v1alpha3_CiliumNetworkingSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Cilium = nil + } return nil } diff --git a/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go b/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go index bcc8c7970b7ae..1aae6a5ebefd7 100644 --- a/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go +++ b/pkg/apis/kops/v1alpha3/zz_generated.deepcopy.go @@ -2206,6 +2206,11 @@ func (in *GCESpec) DeepCopy() *GCESpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPNetworkingSpec) DeepCopyInto(out *GCPNetworkingSpec) { *out = *in + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(CiliumNetworkingSpec) + (*in).DeepCopyInto(*out) + } return } @@ -4938,7 +4943,7 @@ func (in *NetworkingSpec) DeepCopyInto(out *NetworkingSpec) { if in.GCP != nil { in, out := &in.GCP, &out.GCP *out = new(GCPNetworkingSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.Kindnet != nil { in, out := &in.Kindnet, &out.Kindnet diff --git a/pkg/apis/kops/zz_generated.deepcopy.go b/pkg/apis/kops/zz_generated.deepcopy.go index fd602a22a2d0a..5e82381775d78 100644 --- a/pkg/apis/kops/zz_generated.deepcopy.go +++ b/pkg/apis/kops/zz_generated.deepcopy.go @@ -2369,6 +2369,11 @@ func (in *GCESpec) DeepCopy() *GCESpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPNetworkingSpec) DeepCopyInto(out *GCPNetworkingSpec) { *out = *in + if in.Cilium != nil { + in, out := &in.Cilium, &out.Cilium + *out = new(CiliumNetworkingSpec) + (*in).DeepCopyInto(*out) + } return } @@ -5156,7 +5161,7 @@ func (in *NetworkingSpec) DeepCopyInto(out *NetworkingSpec) { if in.GCP != nil { in, out := &in.GCP, &out.GCP *out = new(GCPNetworkingSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.Kindnet != nil { in, out := &in.Kindnet, &out.Kindnet diff --git a/pkg/apis/nodeup/config.go b/pkg/apis/nodeup/config.go index cf01203240d90..f7da9b7c04905 100644 --- a/pkg/apis/nodeup/config.go +++ b/pkg/apis/nodeup/config.go @@ -345,6 +345,19 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi } } + if cluster.Spec.Networking.GCP != nil { + config.Networking.GCP = &kops.GCPNetworkingSpec{} + if cluster.Spec.Networking.GCP.Cilium != nil { + config.Networking.GCP.Cilium = cluster.Spec.Networking.GCP.Cilium + if cluster.Spec.Networking.GCP.Cilium.IPAM == kops.CiliumIpamEni { + config.Networking.GCP.Cilium.IPAM = kops.CiliumIpamEni + } + if model.UseCiliumEtcd(cluster) { + config.UseCiliumEtcd = true + } + } + } + if cluster.Spec.Networking.CNI != nil && cluster.Spec.Networking.CNI.UsesSecondaryIP { config.Networking.CNI = &kops.CNINetworkingSpec{UsesSecondaryIP: true} } diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index afa894a22c5f9..df63e0adcef64 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -35,6 +35,9 @@ var _ loader.ClusterOptionsBuilder = &CiliumOptionsBuilder{} func (b *CiliumOptionsBuilder) BuildOptions(o *kops.Cluster) error { clusterSpec := &o.Spec c := clusterSpec.Networking.Cilium + if c == nil && clusterSpec.Networking.GCP != nil { + c = clusterSpec.Networking.GCP.Cilium + } if c == nil { return nil } diff --git a/pkg/model/context.go b/pkg/model/context.go index aade9c9e055eb..e025231631c3b 100644 --- a/pkg/model/context.go +++ b/pkg/model/context.go @@ -418,3 +418,7 @@ func (b *KopsModelContext) NetworkingIsCalico() bool { func (b *KopsModelContext) NetworkingIsCilium() bool { return b.Cluster.Spec.Networking.Cilium != nil } + +func (b *KopsModelContext) NetworkingIsGCPWithCilium() bool { + return b.Cluster.Spec.Networking.GCP != nil && b.Cluster.Spec.Networking.GCP.Cilium != nil +} diff --git a/pkg/model/gcemodel/firewall.go b/pkg/model/gcemodel/firewall.go index fac972eb1f0bf..632b88d51362c 100644 --- a/pkg/model/gcemodel/firewall.go +++ b/pkg/model/gcemodel/firewall.go @@ -148,7 +148,7 @@ func (b *FirewallModelBuilder) Build(c *fi.CloudupModelBuilderContext) error { if b.NetworkingIsCalico() { t.Allowed = append(t.Allowed, "ipip") } - if b.NetworkingIsCilium() { + if b.NetworkingIsCilium() || b.NetworkingIsGCPWithCilium() { t.Allowed = append(t.Allowed, fmt.Sprintf("udp:%d", wellknownports.VxlanUDP)) if model.UseCiliumEtcd(b.Cluster) { t.Allowed = append(t.Allowed, fmt.Sprintf("tcp:%d", wellknownports.EtcdCiliumClientPort)) diff --git a/tests/integration/create_cluster/cilium-gce/expected-v1alpha2.yaml b/tests/integration/create_cluster/cilium-gce/expected-v1alpha2.yaml new file mode 100644 index 0000000000000..e768be33183e6 --- /dev/null +++ b/tests/integration/create_cluster/cilium-gce/expected-v1alpha2.yaml @@ -0,0 +1,102 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + name: minimal.example.com +spec: + api: + loadBalancer: + type: Public + authorization: + rbac: {} + channel: stable + cloudConfig: {} + cloudProvider: gce + configBase: memfs://tests/minimal.example.com + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - instanceGroup: control-plane-us-test1-a + name: a + manager: + backupRetentionDays: 90 + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - instanceGroup: control-plane-us-test1-a + name: a + manager: + backupRetentionDays: 90 + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubeProxy: + enabled: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: v1.32.0 + networking: + gce: + cilium: + enableNodePort: true + nonMasqueradeCIDR: 10.0.0.0/8 + podCIDR: 10.4.0.0/14 + project: testproject + serviceClusterIPRange: 10.1.0.0/16 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 10.0.32.0/19 + name: us-test1 + region: us-test1 + type: Public + topology: + dns: + type: None + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: minimal.example.com + name: control-plane-us-test1-a +spec: + image: ubuntu-os-cloud/ubuntu-2404-noble-amd64-v20250606 + machineType: e2-medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test1 + zones: + - us-test1-a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: minimal.example.com + name: nodes-us-test1-a +spec: + image: ubuntu-os-cloud/ubuntu-2404-noble-amd64-v20250606 + machineType: e2-medium + maxSize: 1 + minSize: 1 + role: Node + subnets: + - us-test1 + zones: + - us-test1-a diff --git a/tests/integration/create_cluster/cilium-gce/options.yaml b/tests/integration/create_cluster/cilium-gce/options.yaml new file mode 100644 index 0000000000000..e5d58b0d28f72 --- /dev/null +++ b/tests/integration/create_cluster/cilium-gce/options.yaml @@ -0,0 +1,7 @@ +ClusterName: minimal.example.com +CloudProvider: gce +Networking: gcp-with-cilium +Project: testproject +KubernetesVersion: v1.32.0 +Zones: +- us-test1-a \ No newline at end of file diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-bootstrap_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-bootstrap_content new file mode 100644 index 0000000000000..c68197719ca07 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-bootstrap_content @@ -0,0 +1,108 @@ +kind: Addons +metadata: + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 0bab1697c9348ba641bb4d7b1bfbe3a2e8a1d56a2571c2d1d4a669d8b33db467 + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: 2a78f55817919201e8a5896bbe8d8ab93798cd06991202e4cfc5f29a95b40fce + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: da91eb5cf9a29f1b03510007d6d54603aef2fc23a305abc9ba496c510dfd3bc7 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 686cc69e559a1c6f5e8b94e38de54a575a25c432ed5ceec565244b965fb5f07f + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 5c4088f2427f70a8ad8b6d574479b57f7764bd560a62aa18a1b1efad240e30a7 + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.7.0 + manifest: storage-gce.addons.k8s.io/v1.7.0.yaml + manifestHash: 6e9ece81d6a7890599d0ce7e56c4f415863a1b64b69849a119c96d1c47670620 + name: storage-gce.addons.k8s.io + selector: + k8s-addon: storage-gce.addons.k8s.io + version: 9.99.0 + - id: k8s-1.23 + manifest: gcp-cloud-controller.addons.k8s.io/k8s-1.23.yaml + manifestHash: 2dc57d81c23536dff2d540caf6d1076df47d98818ab6b1693b25dd62ea03652a + name: gcp-cloud-controller.addons.k8s.io + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: admissionregistration.k8s.io + kind: MutatingWebhookConfiguration + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + selector: + k8s-addon: gcp-cloud-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.16 + manifest: networking.cilium.io/k8s-1.16-v1.15.yaml + manifestHash: 6c5155acf3eb36610a5e4ec305adf587fd5828cf2ebc49c819d4498c39411c65 + name: networking.cilium.io + needsRollingUpdate: all + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0 diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..2e775d02dc0ff --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,373 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 10s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.13.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + failureThreshold: 1 + httpGet: + path: /ready + port: 8181 + scheme: HTTP + periodSeconds: 5 + timeoutSeconds: 5 + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 10.1.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.9.0 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000000..ecf8cd0f34f7d --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,133 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.34.0-beta.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.34.0-beta.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=google-clouddns + - --zone=*/1 + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.34.0-beta.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-gcp-cloud-controller.addons.k8s.io-k8s-1.23_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-gcp-cloud-controller.addons.k8s.io-k8s-1.23_content new file mode 100644 index 0000000000000..961e5fb5a65c0 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-gcp-cloud-controller.addons.k8s.io-k8s-1.23_content @@ -0,0 +1,550 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + component: cloud-controller-manager + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: cloud-controller-manager + namespace: kube-system +spec: + selector: + matchLabels: + component: cloud-controller-manager + template: + metadata: + labels: + component: cloud-controller-manager + kops.k8s.io/managed-by: kops + tier: control-plane + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --allocate-node-cidrs=true + - --cidr-allocator-type=CloudAllocator + - --cluster-cidr=10.4.0.0/14 + - --cluster-name=cilium-gce-example-com + - --configure-cloud-routes=false + - --controllers=* + - --leader-elect=true + - --v=2 + - --cloud-provider=gce + - --use-service-account-credentials=true + - --cloud-config=/etc/kubernetes/cloud.config + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/cloud-provider-gcp/cloud-controller-manager:v35.0.0 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10258 + scheme: HTTPS + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 15 + name: cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/kubernetes/cloud.config + name: cloudconfig + readOnly: true + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node.kubernetes.io/not-ready + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + volumes: + - hostPath: + path: /etc/kubernetes/cloud.config + type: "" + name: cloudconfig + updateStrategy: + type: RollingUpdate + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system:cloud-controller-manager +rules: +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-controller-manager + resources: + - leases + verbs: + - get + - update +- apiGroups: + - "" + resources: + - endpoints + - serviceaccounts + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - update +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - '*' + resources: + - '*' + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system::leader-locking-cloud-controller-manager + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - watch +- apiGroups: + - "" + resourceNames: + - cloud-controller-manager + resources: + - configmaps + verbs: + - create + - patch + - get + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system:controller:cloud-node-controller +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - update + - delete + - patch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - get + - list + - update + - delete + - patch +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - delete +- apiGroups: + - "" + resources: + - pods/status + verbs: + - list + - delete + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system::leader-locking-cloud-controller-manager + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: system::leader-locking-cloud-controller-manager +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system:controller:cloud-node-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:controller:cloud-node-controller +subjects: +- kind: ServiceAccount + name: cloud-node-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: system:controller:pvl-controller +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: gce:cloud-provider + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - patch + - update + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: gce:cloud-provider +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + kubernetes.io/deprecation: cloud-provider role is DEPRECATED in the concern of + potential collisions and will be removed in 1.16. Do not use this role. + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: cloud-provider + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - patch + - update + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + kubernetes.io/deprecation: cloud-provider clusterrole is DEPRECATED in the concern + of potential collisions and will be removed in 1.16. Do not use this role. + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: cloud-provider +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: gce:cloud-provider + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: gce:cloud-provider +subjects: +- kind: ServiceAccount + name: cloud-provider + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + addonmanager.kubernetes.io/mode: Reconcile + app.kubernetes.io/managed-by: kops + k8s-addon: gcp-cloud-controller.addons.k8s.io + name: gce:cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: gce:cloud-provider +subjects: +- kind: ServiceAccount + name: cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000000..fa291ec8922c7 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,219 @@ +apiVersion: v1 +data: + config.yaml: | + {"clusterName":"cilium-gce.example.com","cloud":"gce","configBase":"memfs://tests/cilium-gce.example.com","secretStore":"memfs://tests/cilium-gce.example.com/secrets","server":{"Listen":":3988","provider":{"gce":{"projectID":"testproject","region":"us-test1","clusterName":"cilium-gce.example.com","MaxTimeSkew":300}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.34.0-beta.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.cilium-gce.example.com + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.34.0-beta.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + - name: KOPS_RUN_TOO_NEW_VERSION + value: "1" + image: registry.k8s.io/kops/kops-controller:1.34.0-beta.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000000..bb4d69ef0ef88 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000000..c8939ddffd3ad --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: LimitRange +metadata: + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-networking.cilium.io-k8s-1.16_content new file mode 100644 index 0000000000000..a29db3c85bd3c --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -0,0 +1,1184 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium + io.cilium/app: operator + name: cilium-operator + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system + +--- + +apiVersion: v1 +data: + agent-not-ready-taint-key: node.cilium.io/agent-not-ready + auto-direct-node-routes: "false" + bpf-distributed-lru: "false" + bpf-events-drop-enabled: "true" + bpf-events-policy-verdict-enabled: "true" + bpf-events-trace-enabled: "true" + bpf-lb-acceleration: disabled + bpf-lb-algorithm-annotation: "false" + bpf-lb-external-clusterip: "false" + bpf-lb-map-max: "65536" + bpf-lb-mode-annotation: "false" + bpf-lb-sock: "false" + bpf-lb-source-range-all-types: "false" + bpf-map-dynamic-size-ratio: "0.0025" + bpf-policy-map-max: "16384" + bpf-policy-stats-map-max: "65536" + bpf-root: /sys/fs/bpf + cgroup-root: /run/cilium/cgroupv2 + cilium-endpoint-gc-interval: 5m0s + cluster-name: default + cluster-pool-ipv4-mask-size: "24" + clustermesh-enable-endpoint-sync: "false" + clustermesh-enable-mcs-api: "false" + cni-exclusive: "true" + cni-log-file: /var/run/cilium/cilium-cni.log + custom-cni-conf: "false" + datapath-mode: veth + debug: "false" + debug-verbose: "" + default-lb-service-ipam: lbipam + direct-routing-skip-unreachable: "false" + disable-endpoint-crd: "false" + dnsproxy-enable-transparent-mode: "true" + dnsproxy-socket-linger-timeout: "10" + egress-gateway-reconciliation-trigger-interval: 1s + enable-auto-protect-node-port-range: "true" + enable-bpf-clock-probe: "false" + enable-bpf-masquerade: "false" + enable-endpoint-health-checking: "true" + enable-endpoint-lockdown-on-policy-overflow: "false" + enable-health-check-loadbalancer-ip: "false" + enable-health-check-nodeport: "true" + enable-health-checking: "true" + enable-internal-traffic-policy: "true" + enable-ipv4: "true" + enable-ipv4-big-tcp: "false" + enable-ipv4-masquerade: "true" + enable-ipv6: "false" + enable-ipv6-big-tcp: "false" + enable-ipv6-masquerade: "false" + enable-k8s-networkpolicy: "true" + enable-l2-neigh-discovery: "false" + enable-l7-proxy: "true" + enable-lb-ipam: "true" + enable-local-redirect-policy: "false" + enable-masquerade-to-route-source: "false" + enable-node-port: "false" + enable-node-selector-labels: "false" + enable-non-default-deny-policies: "true" + enable-policy: default + enable-sctp: "false" + enable-service-topology: "false" + enable-source-ip-verification: "true" + enable-svc-source-range-check: "true" + enable-tcx: "true" + enable-vtep: "false" + enable-well-known-identities: "false" + enable-xt-socket-fallback: "true" + envoy-access-log-buffer-size: "4096" + envoy-base-id: "0" + envoy-keep-cap-netbindservice: "false" + external-envoy-proxy: "false" + health-check-icmp-failure-threshold: "3" + http-retry-count: "3" + http-stream-idle-timeout: "300" + identity-allocation-mode: crd + identity-change-grace-period: 5s + identity-gc-interval: 15m0s + identity-heartbeat-timeout: 30m0s + identity-management-mode: agent + install-no-conntrack-iptables-rules: "false" + ipam: kubernetes + ipam-cilium-node-update-rate: 15s + iptables-random-fully: "false" + k8s-require-ipv4-pod-cidr: "false" + k8s-require-ipv6-pod-cidr: "false" + kube-proxy-replacement: "false" + max-connected-clusters: "255" + mesh-auth-enabled: "true" + mesh-auth-gc-interval: 5m0s + mesh-auth-queue-size: "1024" + mesh-auth-rotated-identities-queue-size: "1024" + metrics-sampling-interval: 5m + monitor-aggregation: medium + monitor-aggregation-flags: all + monitor-aggregation-interval: 5s + nat-map-stats-entries: "32" + nat-map-stats-interval: 30s + node-port-bind-protection: "true" + nodeport-addresses: "" + nodes-gc-interval: 5m0s + operator-api-serve-addr: 127.0.0.1:9234 + policy-cidr-match-mode: "" + policy-default-local-cluster: "false" + preallocate-bpf-maps: "false" + procfs: /host/proc + proxy-connect-timeout: "2" + proxy-idle-timeout-seconds: "60" + proxy-initial-fetch-timeout: "30" + proxy-max-concurrent-retries: "128" + proxy-max-connection-duration-seconds: "0" + proxy-max-requests-per-connection: "0" + proxy-xff-num-trusted-hops-egress: "0" + proxy-xff-num-trusted-hops-ingress: "0" + remove-cilium-node-taints: "true" + routing-mode: tunnel + service-no-backend-response: reject + set-cilium-is-up-condition: "true" + set-cilium-node-taints: "true" + synchronize-k8s-nodes: "true" + tofqdns-dns-reject-response-code: refused + tofqdns-enable-dns-compression: "true" + tofqdns-endpoint-max-ip-per-hostname: "1000" + tofqdns-idle-connection-grace-period: 0s + tofqdns-max-deferred-connection-deletes: "10000" + tofqdns-preallocate-identities: "true" + tofqdns-proxy-response-max-delay: 100ms + tunnel-protocol: vxlan + tunnel-source-port-range: 0-0 + unmanaged-pod-watcher-interval: "15" + vtep-cidr: "" + vtep-endpoint: "" + vtep-mac: "" + vtep-mask: "" + write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist +kind: ConfigMap +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium-config + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch + - get +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumbgppeeringpolicies + - ciliumbgpnodeconfigs + - ciliumbgpadvertisements + - ciliumbgppeerconfigs + - ciliumclusterwideenvoyconfigs + - ciliumclusterwidenetworkpolicies + - ciliumegressgatewaypolicies + - ciliumendpoints + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumnetworkpolicies + - ciliumnodes + - ciliumnodeconfigs + - ciliumcidrgroups + - ciliuml2announcementpolicies + - ciliumpodippools + verbs: + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + - ciliumendpoints + - ciliumnodes + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + verbs: + - delete + - get +- apiGroups: + - cilium.io + resources: + - ciliumnodes + - ciliumnodes/status + verbs: + - get + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints/status + - ciliumendpoints + - ciliuml2announcementpolicies/status + - ciliumbgpnodeconfigs/status + verbs: + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resourceNames: + - cilium-config + resources: + - configmaps + verbs: + - patch +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - update + - patch +- apiGroups: + - "" + resources: + - namespaces + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + - endpoints + verbs: + - get + - list + - watch + - create + - update + - delete + - patch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumclusterwidenetworkpolicies + verbs: + - create + - update + - deletecollection + - patch + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies/status + verbs: + - patch + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpoints + - ciliumidentities + verbs: + - delete + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumidentities + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumnodes + verbs: + - create + - update + - get + - list + - watch + - delete +- apiGroups: + - cilium.io + resources: + - ciliumnodes/status + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumendpointslices + - ciliumenvoyconfigs + - ciliumbgppeerconfigs + - ciliumbgpadvertisements + - ciliumbgpnodeconfigs + verbs: + - create + - update + - get + - list + - watch + - delete + - patch +- apiGroups: + - cilium.io + resources: + - ciliumbgpclusterconfigs/status + - ciliumbgppeerconfigs/status + verbs: + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resourceNames: + - ciliumloadbalancerippools.cilium.io + - ciliumbgppeeringpolicies.cilium.io + - ciliumbgpclusterconfigs.cilium.io + - ciliumbgppeerconfigs.cilium.io + - ciliumbgpadvertisements.cilium.io + - ciliumbgpnodeconfigs.cilium.io + - ciliumbgpnodeconfigoverrides.cilium.io + - ciliumclusterwideenvoyconfigs.cilium.io + - ciliumclusterwidenetworkpolicies.cilium.io + - ciliumegressgatewaypolicies.cilium.io + - ciliumendpoints.cilium.io + - ciliumendpointslices.cilium.io + - ciliumenvoyconfigs.cilium.io + - ciliumidentities.cilium.io + - ciliumlocalredirectpolicies.cilium.io + - ciliumnetworkpolicies.cilium.io + - ciliumnodes.cilium.io + - ciliumnodeconfigs.cilium.io + - ciliumcidrgroups.cilium.io + - ciliuml2announcementpolicies.cilium.io + - ciliumpodippools.cilium.io + - ciliumgatewayclassconfigs.cilium.io + resources: + - customresourcedefinitions + verbs: + - update +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools + - ciliumpodippools + - ciliumbgppeeringpolicies + - ciliumbgpclusterconfigs + - ciliumbgpnodeconfigoverrides + - ciliumbgppeerconfigs + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumpodippools + verbs: + - create +- apiGroups: + - cilium.io + resources: + - ciliumloadbalancerippools/status + verbs: + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/part-of: cilium + role.kubernetes.io/networking: "1" + name: cilium-config-agent + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cilium-config-agent +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + k8s-app: cilium + kubernetes.io/cluster-service: "true" + role.kubernetes.io/networking: "1" + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + template: + metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined + container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined + container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined + container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined + kubectl.kubernetes.io/default-container: cilium-agent + labels: + app.kubernetes.io/name: cilium-agent + app.kubernetes.io/part-of: cilium + k8s-app: cilium + kops.k8s.io/managed-by: kops + kubernetes.io/cluster-service: "true" + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: cilium + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: GOMEMLIMIT + valueFrom: + resourceFieldRef: + divisor: "1" + resource: limits.memory + - name: KUBE_CLIENT_BACKOFF_BASE + value: "1" + - name: KUBE_CLIENT_BACKOFF_DURATION + value: "120" + - name: KUBERNETES_SERVICE_HOST + value: api.internal.cilium-gce.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /cni-uninstall.sh + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + - name: require-k8s-connectivity + value: "false" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + ports: null + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: + requests: + cpu: 25m + memory: 128Mi + securityContext: + capabilities: + add: + - CHOWN + - KILL + - NET_ADMIN + - NET_RAW + - IPC_LOCK + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + startupProbe: + failureThreshold: 300 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/proc/sys/net + name: host-proc-sys-net + - mountPath: /host/proc/sys/kernel + name: host-proc-sys-kernel + - mountPath: /sys/fs/bpf + mountPropagation: HostToContainer + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /var/run/cilium/netns + mountPropagation: HostToContainer + name: cilium-netns + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /tmp + name: tmp + hostNetwork: true + initContainers: + - command: + - cilium-dbg + - build-config + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: KUBERNETES_SERVICE_HOST + value: api.internal.cilium-gce.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + name: config + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp + name: tmp + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-mount /hostbin/cilium-mount; + nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; + rm /hostbin/cilium-mount + env: + - name: CGROUP_ROOT + value: /run/cilium/cgroupv2 + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + name: mount-cgroup + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - command: + - sh + - -ec + - | + cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix; + nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix"; + rm /hostbin/cilium-sysctlfix + env: + - name: BIN_PATH + value: /opt/cni/bin + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + name: apply-sysctl-overwrites + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /hostproc + name: hostproc + - mountPath: /hostbin + name: cni-path + - args: + - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf + command: + - /bin/bash + - -c + - -- + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + name: mount-bpf-fs + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + - name: WRITE_CNI_CONF_WHEN_READY + valueFrom: + configMapKeyRef: + key: write-cni-conf-when-ready + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: api.internal.cilium-gce.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + name: clean-cilium-state + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_MODULE + - SYS_ADMIN + - SYS_RESOURCE + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + - command: + - /install-plugin.sh + image: quay.io/cilium/cilium:v1.18.6 + imagePullPolicy: IfNotPresent + name: install-cni-binaries + resources: + requests: + cpu: 100m + memory: 10Mi + securityContext: + capabilities: + drop: + - ALL + seLinuxOptions: + level: s0 + type: spc_t + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-path + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + restartPolicy: Always + securityContext: + seccompProfile: + type: Unconfined + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + - emptyDir: {} + name: tmp + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /var/run/netns + type: DirectoryOrCreate + name: cilium-netns + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /proc + type: Directory + name: hostproc + - hostPath: + path: /run/cilium/cgroupv2 + type: DirectoryOrCreate + name: cilium-cgroup + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + projected: + defaultMode: 256 + sources: + - secret: + name: cilium-clustermesh + optional: true + - secret: + items: + - key: tls.key + path: common-etcd-client.key + - key: tls.crt + path: common-etcd-client.crt + - key: ca.crt + path: common-etcd-client-ca.crt + name: clustermesh-apiserver-remote-cert + optional: true + - secret: + items: + - key: tls.key + path: local-etcd-client.key + - key: tls.crt + path: local-etcd-client.crt + - key: ca.crt + path: local-etcd-client-ca.crt + name: clustermesh-apiserver-local-cert + optional: true + - hostPath: + path: /proc/sys/net + type: Directory + name: host-proc-sys-net + - hostPath: + path: /proc/sys/kernel + type: Directory + name: host-proc-sys-kernel + updateStrategy: + type: OnDelete + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium + io.cilium/app: operator + name: cilium-operator + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 50% + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: cilium-operator + app.kubernetes.io/part-of: cilium + io.cilium/app: operator + kops.k8s.io/managed-by: kops + name: cilium-operator + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + io.cilium/app: operator + topologyKey: kubernetes.io/hostname + automountServiceAccountToken: true + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + - --eni-tags=KubernetesCluster=cilium-gce.example.com + command: + - cilium-operator + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: api.internal.cilium-gce.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/operator:v1.18.6 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + name: cilium-operator + readinessProbe: + failureThreshold: 5 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 5 + timeoutSeconds: 3 + resources: + requests: + cpu: 25m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + restartPolicy: Always + securityContext: + seccompProfile: + type: RuntimeDefault + serviceAccountName: cilium-operator + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.cilium.io/agent-not-ready + operator: Exists + volumes: + - configMap: + name: cilium-config + name: cilium-config-path diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-storage-gce.addons.k8s.io-v1.7.0_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-storage-gce.addons.k8s.io-v1.7.0_content new file mode 100644 index 0000000000000..e109954baeefa --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cilium-gce.example.com-addons-storage-gce.addons.k8s.io-v1.7.0_content @@ -0,0 +1,15 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + labels: + addon.kops.k8s.io/name: storage-gce.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: storage-gce.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: standard +parameters: + type: pd-standard +provisioner: kubernetes.io/gce-pd diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000000..1973963b95e73 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,242 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + name: cilium-gce.example.com +spec: + api: + dns: {} + authorization: + rbac: {} + channel: stable + cloudConfig: + gceServiceAccount: default + gcpPDCSIDriver: + defaultStorageClassName: balanced-csi + enabled: false + version: v1.22.1 + manageStorageClasses: true + multizone: true + nodeTags: cilium-gce-example-com-k8s-io-role-node + cloudControllerManager: + allocateNodeCIDRs: true + cidrAllocatorType: CloudAllocator + clusterCIDR: 10.4.0.0/14 + clusterName: cilium-gce-example-com + configureCloudRoutes: false + controllers: + - '*' + image: registry.k8s.io/cloud-provider-gcp/cloud-controller-manager:v35.0.0 + leaderElection: + leaderElect: true + cloudProvider: gce + clusterDNSDomain: cluster.local + configBase: memfs://tests/cilium-gce.example.com + containerd: + logLevel: info + runc: + version: 1.3.4 + sandboxImage: registry.k8s.io/pause:3.10.1 + version: 2.1.6 + dnsZone: "1" + etcdClusters: + - backups: + backupStore: memfs://tests/cilium-gce.example.com/backups/etcd/main + cpuRequest: 200m + etcdMembers: + - instanceGroup: master-us-test1-a + name: a + manager: + backupRetentionDays: 90 + memoryRequest: 100Mi + name: main + version: 3.5.25 + - backups: + backupStore: memfs://tests/cilium-gce.example.com/backups/etcd/events + cpuRequest: 100m + etcdMembers: + - instanceGroup: master-us-test1-a + name: a + manager: + backupRetentionDays: 90 + memoryRequest: 100Mi + name: events + version: 3.5.25 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://tests/cilium-gce.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - DefaultStorageClass + - DefaultTolerationSeconds + - LimitRanger + - MutatingAdmissionWebhook + - NamespaceLifecycle + - NodeRestriction + - ResourceQuota + - RuntimeClass + - ServiceAccount + - ValidatingAdmissionPolicy + - ValidatingAdmissionWebhook + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.32.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.cilium-gce.example.com + serviceAccountJWKSURI: https://api.internal.cilium-gce.example.com/openid/v1/jwks + serviceClusterIPRange: 10.1.0.0/16 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: false + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterCIDR: 10.4.0.0/14 + clusterName: cilium-gce.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.26.0 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 10.1.0.10 + kubeProxy: + clusterCIDR: 10.4.0.0/14 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.32.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 10.1.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hairpinMode: promiscuous-bridge + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.32.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 10.1.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hairpinMode: promiscuous-bridge + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.cilium-gce.example.com + networking: + gce: + cilium: + agentPrometheusPort: 9090 + bpfCTGlobalAnyMax: 262144 + bpfCTGlobalTCPMax: 524288 + bpfLBAlgorithm: random + bpfLBMaglevTableSize: "16381" + bpfLBMapMax: 65536 + bpfNATGlobalMax: 524288 + bpfNeighGlobalMax: 524288 + bpfPolicyMapMax: 16384 + clusterName: default + cniExclusive: true + cpuRequest: 25m + disableCNPStatusUpdates: true + disableMasquerade: false + enableBPFMasquerade: false + enableEndpointHealthChecking: true + enableL7Proxy: true + enableLocalRedirectPolicy: false + enableRemoteNodeIdentity: true + enableUnreachableRoutes: false + gatewayAPI: + enabled: false + hubble: + enabled: false + identityAllocationMode: crd + identityChangeGracePeriod: 5s + ingress: + enabled: false + ipam: kubernetes + memoryRequest: 128Mi + monitorAggregation: medium + sidecarIstioProxyImage: cilium/istio_proxy + toFqdnsDnsRejectResponseCode: refused + tunnel: vxlan + version: v1.18.6 + nonMasqueradeCIDR: 10.0.0.0/8 + podCIDR: 10.4.0.0/14 + project: testproject + secretStore: memfs://tests/cilium-gce.example.com/secrets + serviceClusterIPRange: 10.1.0.0/16 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 10.0.32.0/19 + name: us-test1 + region: us-test1 + type: Public + topology: + dns: + type: Public diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000000..fb8835d7c40e7 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.5.25" +} diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000000..fb8835d7c40e7 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.5.25" +} diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000000..86942cf6d89c9 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.34.0-beta.1 diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-etcdmanager-events-master-us-test1-a_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-etcdmanager-events-master-us-test1-a_content new file mode 100644 index 0000000000000..314faf556d212 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-etcdmanager-events-master-us-test1-a_content @@ -0,0 +1,165 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /ko-app/etcd-manager + --backup-store=memfs://tests/cilium-gce.example.com/backups/etcd/events --client-urls=https://__name__:4002 + --cluster-name=etcd-events --containerized=true --dns-suffix=.internal.cilium-gce.example.com + --grpc-port=3997 --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995 + --v=6 --volume-name-tag=k8s-io-etcd-events --volume-provider=gce --volume-tag=k8s-io-cluster-name=cilium-gce-example-com + --volume-tag=k8s-io-etcd-events --volume-tag=k8s-io-role-master=master > /tmp/pipe + 2>&1 + env: + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 90d + image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20260227 + name: etcd-manager + resources: + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /opt + name: opt + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + initContainers: + - args: + - --target-dir=/opt/kops-utils/ + - --src=/ko-app/kops-utils-cp + command: + - /ko-app/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: kops-utils-cp + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.4.13 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:v3.4.13 + name: init-etcd-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.5.25 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:v3.5.25 + name: init-etcd-3-5-25 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.6.6 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:v3.6.6 + name: init-etcd-3-6-6 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.4.3 + - --src=/opt/etcd-v3.4.13/etcd + - --src=/opt/etcd-v3.4.13/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: init-etcd-symlinks-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.5.0 + - --target-dir=/opt/etcd-v3.5.1 + - --target-dir=/opt/etcd-v3.5.13 + - --target-dir=/opt/etcd-v3.5.17 + - --target-dir=/opt/etcd-v3.5.21 + - --target-dir=/opt/etcd-v3.5.23 + - --target-dir=/opt/etcd-v3.5.24 + - --target-dir=/opt/etcd-v3.5.3 + - --target-dir=/opt/etcd-v3.5.4 + - --target-dir=/opt/etcd-v3.5.6 + - --target-dir=/opt/etcd-v3.5.7 + - --target-dir=/opt/etcd-v3.5.9 + - --src=/opt/etcd-v3.5.25/etcd + - --src=/opt/etcd-v3.5.25/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: init-etcd-symlinks-3-5-25 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.6.5 + - --src=/opt/etcd-v3.6.6/etcd + - --src=/opt/etcd-v3.6.6/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: init-etcd-symlinks-3-6-6 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - emptyDir: {} + name: opt + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-etcdmanager-main-master-us-test1-a_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-etcdmanager-main-master-us-test1-a_content new file mode 100644 index 0000000000000..1286851a1d88b --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-etcdmanager-main-master-us-test1-a_content @@ -0,0 +1,165 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /ko-app/etcd-manager + --backup-store=memfs://tests/cilium-gce.example.com/backups/etcd/main --client-urls=https://__name__:4001 + --cluster-name=etcd --containerized=true --dns-suffix=.internal.cilium-gce.example.com + --grpc-port=3996 --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994 + --v=6 --volume-name-tag=k8s-io-etcd-main --volume-provider=gce --volume-tag=k8s-io-cluster-name=cilium-gce-example-com + --volume-tag=k8s-io-etcd-main --volume-tag=k8s-io-role-master=master > /tmp/pipe + 2>&1 + env: + - name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION + value: 90d + image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20260227 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /opt + name: opt + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + initContainers: + - args: + - --target-dir=/opt/kops-utils/ + - --src=/ko-app/kops-utils-cp + command: + - /ko-app/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: kops-utils-cp + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.4.13 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:v3.4.13 + name: init-etcd-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.5.25 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:v3.5.25 + name: init-etcd-3-5-25 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --target-dir=/opt/etcd-v3.6.6 + - --src=/usr/local/bin/etcd + - --src=/usr/local/bin/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/etcd:v3.6.6 + name: init-etcd-3-6-6 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.4.3 + - --src=/opt/etcd-v3.4.13/etcd + - --src=/opt/etcd-v3.4.13/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: init-etcd-symlinks-3-4-13 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.5.0 + - --target-dir=/opt/etcd-v3.5.1 + - --target-dir=/opt/etcd-v3.5.13 + - --target-dir=/opt/etcd-v3.5.17 + - --target-dir=/opt/etcd-v3.5.21 + - --target-dir=/opt/etcd-v3.5.23 + - --target-dir=/opt/etcd-v3.5.24 + - --target-dir=/opt/etcd-v3.5.3 + - --target-dir=/opt/etcd-v3.5.4 + - --target-dir=/opt/etcd-v3.5.6 + - --target-dir=/opt/etcd-v3.5.7 + - --target-dir=/opt/etcd-v3.5.9 + - --src=/opt/etcd-v3.5.25/etcd + - --src=/opt/etcd-v3.5.25/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: init-etcd-symlinks-3-5-25 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + - args: + - --symlink + - --target-dir=/opt/etcd-v3.6.5 + - --src=/opt/etcd-v3.6.6/etcd + - --src=/opt/etcd-v3.6.6/etcdctl + command: + - /opt/kops-utils/kops-utils-cp + image: registry.k8s.io/kops/kops-utils-cp:1.35.0-beta.1 + name: init-etcd-symlinks-3-6-6 + resources: {} + volumeMounts: + - mountPath: /opt + name: opt + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - emptyDir: {} + name: opt + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000000..7867d3e53581a --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Pod +metadata: {} +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.34.0-beta.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_nodeupconfig-master-us-test1-a_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_nodeupconfig-master-us-test1-a_content new file mode 100644 index 0000000000000..b160b2edf1b69 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_nodeupconfig-master-us-test1-a_content @@ -0,0 +1,362 @@ +APIServerConfig: + API: + dns: {} + publicName: api.cilium-gce.example.com + ClusterDNSDomain: cluster.local + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: Node,RBAC + bindAddress: 0.0.0.0 + cloudProvider: external + enableAdmissionPlugins: + - DefaultStorageClass + - DefaultTolerationSeconds + - LimitRanger + - MutatingAdmissionWebhook + - NamespaceLifecycle + - NodeRestriction + - ResourceQuota + - RuntimeClass + - ServiceAccount + - ValidatingAdmissionPolicy + - ValidatingAdmissionWebhook + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.32.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.cilium-gce.example.com + serviceAccountJWKSURI: https://api.internal.cilium-gce.example.com/openid/v1/jwks + serviceClusterIPRange: 10.1.0.0/16 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 5ad4965598773d56a37a8e8429c3dc3d86b4c5c26d8417ab333ae345c053dae2@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet + - 646d58f6d98ee670a71d9cdffbf6625aeea2849d567f214bc43a35f8ccb7bf70@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl + - eca252d94176f8e08084433d08cd478c28cba7b773b49d691f1bec0f1e94e7d1@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/mounter + - b5852509ab5c950485794afd49a3fd1c3d6166db988c15b683d2373b7055300f@https://artifacts.k8s.io/binaries/cloud-provider-gcp/v35.0.0/auth-provider-gcp/linux/amd64/auth-provider-gcp + - 4793dc5c1f34ebf8402990d0050f3c294aa3c794cd5a4baa403c1cf10602326d@https://github.com/containerd/containerd/releases/download/v2.1.6/containerd-2.1.6-linux-amd64.tar.gz + - 5966ca40b6187b30e33bfc299c5f1fe72e8c1aa01cf3fefdadf391668f47f103@https://github.com/opencontainers/runc/releases/download/v1.3.4/runc.amd64 + - 86189e1e8de9692eb02daf2f06db8495f687ce2c4ba09a6b64f135990dfb315d@https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/protokube-linux-amd64 + - 0172d3c560aebe1eb4e8599f71c0d8fc68e4eca880add8031de41c8057ca8e3c@https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/channels-linux-amd64 + arm64: + - bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet + - ba4004f98f3d3a7b7d2954ff0a424caa2c2b06b78c17b1dccf2acc76a311a896@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl + - ee06cd4a0e8428a3cced77f4f7db836138c589e8e4bf46f0c676f8ff4b54b942@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/mounter + - 510aee27ef32a27b76e461e9084719365210d56166057b37db290a72dfecbffa@https://artifacts.k8s.io/binaries/cloud-provider-gcp/v35.0.0/auth-provider-gcp/linux/arm64/auth-provider-gcp + - 88d6e32348c36628c8500a630c6dd4b3cb8c680b1d18dc8d1d19041f67757c6e@https://github.com/containerd/containerd/releases/download/v2.1.6/containerd-2.1.6-linux-arm64.tar.gz + - d6dcab36d1b6af1b72c7f0662e5fcf446a291271ba6006532b95c4144e19d428@https://github.com/opencontainers/runc/releases/download/v1.3.4/runc.arm64 + - 25b57b0555fad42e5762246334681bf1c943794fcecdb680a79e482be5c08815@https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/protokube-linux-arm64 + - 04470f8313796032fce85b974da4fc26420f36931e574fff6d117d21caf22770@https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: cilium-gce.example.com +ControlPlaneConfig: + KubeControllerManager: + allocateNodeCIDRs: false + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: external + clusterCIDR: 10.4.0.0/14 + clusterName: cilium-gce.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + KubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.32.0 + leaderElection: + leaderElect: true + logLevel: 2 +DNSZone: "1" +EtcdClusterNames: +- main +- events +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeProxy: + clusterCIDR: 10.4.0.0/14 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.32.0 + logLevel: 2 +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 10.1.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hairpinMode: promiscuous-bridge + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + node-role.kubernetes.io/control-plane: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/control-plane=:NoSchedule +KubernetesVersion: 1.32.0 +Networking: + gcp: + cilium: + agentPrometheusPort: 9090 + bpfCTGlobalAnyMax: 262144 + bpfCTGlobalTCPMax: 524288 + bpfLBAlgorithm: random + bpfLBMaglevTableSize: "16381" + bpfLBMapMax: 65536 + bpfNATGlobalMax: 524288 + bpfNeighGlobalMax: 524288 + bpfPolicyMapMax: 16384 + clusterName: default + cniExclusive: true + cpuRequest: 25m + disableCNPStatusUpdates: true + enableBPFMasquerade: false + enableEndpointHealthChecking: true + enableL7Proxy: true + enableLocalRedirectPolicy: false + enableRemoteNodeIdentity: true + enableUnreachableRoutes: false + gatewayAPI: + enabled: false + hubble: + enabled: false + identityAllocationMode: crd + identityChangeGracePeriod: 5s + ingress: + enabled: false + ipam: kubernetes + masquerade: true + memoryRequest: 128Mi + monitorAggregation: medium + sidecarIstioProxyImage: cilium/istio_proxy + toFQDNsDNSRejectResponseCode: refused + tunnel: vxlan + version: v1.18.6 + nonMasqueradeCIDR: 10.0.0.0/8 + serviceClusterIPRange: 10.1.0.0/16 +UpdatePolicy: automatic +channels: +- memfs://tests/cilium-gce.example.com/addons/bootstrap-channel.yaml +configStore: + keypairs: memfs://tests/cilium-gce.example.com/pki + secrets: memfs://tests/cilium-gce.example.com/secrets +containerdConfig: + logLevel: info + runc: + version: 1.3.4 + sandboxImage: registry.k8s.io/pause:3.10.1 + version: 2.1.6 +etcdManifests: +- memfs://tests/cilium-gce.example.com/manifests/etcd/main-master-us-test1-a.yaml +- memfs://tests/cilium-gce.example.com/manifests/etcd/events-master-us-test1-a.yaml +multizone: true +nodeTags: cilium-gce-example-com-k8s-io-role-node +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml +usesLegacyGossip: false +usesNoneDNS: false diff --git a/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000000..3131455f97685 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,98 @@ +Assets: + amd64: + - 5ad4965598773d56a37a8e8429c3dc3d86b4c5c26d8417ab333ae345c053dae2@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubelet + - 646d58f6d98ee670a71d9cdffbf6625aeea2849d567f214bc43a35f8ccb7bf70@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/kubectl + - eca252d94176f8e08084433d08cd478c28cba7b773b49d691f1bec0f1e94e7d1@https://dl.k8s.io/release/v1.32.0/bin/linux/amd64/mounter + - b5852509ab5c950485794afd49a3fd1c3d6166db988c15b683d2373b7055300f@https://artifacts.k8s.io/binaries/cloud-provider-gcp/v35.0.0/auth-provider-gcp/linux/amd64/auth-provider-gcp + - 4793dc5c1f34ebf8402990d0050f3c294aa3c794cd5a4baa403c1cf10602326d@https://github.com/containerd/containerd/releases/download/v2.1.6/containerd-2.1.6-linux-amd64.tar.gz + - 5966ca40b6187b30e33bfc299c5f1fe72e8c1aa01cf3fefdadf391668f47f103@https://github.com/opencontainers/runc/releases/download/v1.3.4/runc.amd64 + arm64: + - bda9b2324c96693b38c41ecea051bab4c7c434be5683050b5e19025b50dbc0bf@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubelet + - ba4004f98f3d3a7b7d2954ff0a424caa2c2b06b78c17b1dccf2acc76a311a896@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/kubectl + - ee06cd4a0e8428a3cced77f4f7db836138c589e8e4bf46f0c676f8ff4b54b942@https://dl.k8s.io/release/v1.32.0/bin/linux/arm64/mounter + - 510aee27ef32a27b76e461e9084719365210d56166057b37db290a72dfecbffa@https://artifacts.k8s.io/binaries/cloud-provider-gcp/v35.0.0/auth-provider-gcp/linux/arm64/auth-provider-gcp + - 88d6e32348c36628c8500a630c6dd4b3cb8c680b1d18dc8d1d19041f67757c6e@https://github.com/containerd/containerd/releases/download/v2.1.6/containerd-2.1.6-linux-arm64.tar.gz + - d6dcab36d1b6af1b72c7f0662e5fcf446a291271ba6006532b95c4144e19d428@https://github.com/opencontainers/runc/releases/download/v1.3.4/runc.arm64 +CAs: {} +ClusterName: cilium-gce.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeProxy: + clusterCIDR: 10.4.0.0/14 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.32.0 + logLevel: 2 +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: external + clusterDNS: 10.1.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + hairpinMode: promiscuous-bridge + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + nodeLabels: + node-role.kubernetes.io/node: "" + podManifestPath: /etc/kubernetes/manifests + protectKernelDefaults: true + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +KubernetesVersion: 1.32.0 +Networking: + gcp: + cilium: + agentPrometheusPort: 9090 + bpfCTGlobalAnyMax: 262144 + bpfCTGlobalTCPMax: 524288 + bpfLBAlgorithm: random + bpfLBMaglevTableSize: "16381" + bpfLBMapMax: 65536 + bpfNATGlobalMax: 524288 + bpfNeighGlobalMax: 524288 + bpfPolicyMapMax: 16384 + clusterName: default + cniExclusive: true + cpuRequest: 25m + disableCNPStatusUpdates: true + enableBPFMasquerade: false + enableEndpointHealthChecking: true + enableL7Proxy: true + enableLocalRedirectPolicy: false + enableRemoteNodeIdentity: true + enableUnreachableRoutes: false + gatewayAPI: + enabled: false + hubble: + enabled: false + identityAllocationMode: crd + identityChangeGracePeriod: 5s + ingress: + enabled: false + ipam: kubernetes + masquerade: true + memoryRequest: 128Mi + monitorAggregation: medium + sidecarIstioProxyImage: cilium/istio_proxy + toFQDNsDNSRejectResponseCode: refused + tunnel: vxlan + version: v1.18.6 + nonMasqueradeCIDR: 10.0.0.0/8 + serviceClusterIPRange: 10.1.0.0/16 +UpdatePolicy: automatic +containerdConfig: + logLevel: info + runc: + version: 1.3.4 + sandboxImage: registry.k8s.io/pause:3.10.1 + version: 2.1.6 +multizone: true +nodeTags: cilium-gce-example-com-k8s-io-role-node +usesLegacyGossip: false +usesNoneDNS: false diff --git a/tests/integration/update_cluster/cilium-gce/data/google_compute_instance_template_master-us-test1-a-cilium-gce-example-com_metadata_user-data b/tests/integration/update_cluster/cilium-gce/data/google_compute_instance_template_master-us-test1-a-cilium-gce-example-com_metadata_user-data new file mode 100644 index 0000000000000..05f01b49f48e7 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/google_compute_instance_template_master-us-test1-a-cilium-gce-example-com_metadata_user-data @@ -0,0 +1,133 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=c86e072f622b91546b7b3f3cb1a0f8a131e48b966ad018a0ac1520ceedf37725 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=64a9a9510538a449e85d05e13e3cd98b80377d68a673447c26821d40f00f0075 + + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + echo "== Downloading $1 with hash $2 from $3 ==" + local -r file="$1" + local -r hash="$2" + local -a urls + IFS=, read -r -a urls <<< "$3" + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O ${file} --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "== Downloading ${url} using ${cmd} ==" + if ! (${cmd} "${url}"); then + echo "== Failed to download ${url} using ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Failed to validate hash for ${url} ==" + rm -f "${file}" + else + echo "== Downloaded ${url} with hash ${hash} ==" + return 0 + fi + done + done + + echo "== All downloads failed; sleeping before retrying ==" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum "${file}" | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "== Running nodeup ==" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured ==" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: gce +ClusterName: cilium-gce.example.com +ConfigBase: memfs://tests/cilium-gce.example.com +InstanceGroupName: master-us-test1-a +InstanceGroupRole: ControlPlane +NodeupConfigHash: j4WQdUAKqZwYKjRTfHaXfTUyayCpq8+L4xu4TBxQlHI= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/cilium-gce/data/google_compute_instance_template_nodes-cilium-gce-example-com_metadata_user-data b/tests/integration/update_cluster/cilium-gce/data/google_compute_instance_template_nodes-cilium-gce-example-com_metadata_user-data new file mode 100644 index 0000000000000..a28dc6f24369e --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/data/google_compute_instance_template_nodes-cilium-gce-example-com_metadata_user-data @@ -0,0 +1,156 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=c86e072f622b91546b7b3f3cb1a0f8a131e48b966ad018a0ac1520ceedf37725 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.34.0-beta.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.34.0-beta.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=64a9a9510538a449e85d05e13e3cd98b80377d68a673447c26821d40f00f0075 + + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + echo "== Downloading $1 with hash $2 from $3 ==" + local -r file="$1" + local -r hash="$2" + local -a urls + IFS=, read -r -a urls <<< "$3" + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O ${file} --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "== Downloading ${url} using ${cmd} ==" + if ! (${cmd} "${url}"); then + echo "== Failed to download ${url} using ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Failed to validate hash for ${url} ==" + rm -f "${file}" + else + echo "== Downloaded ${url} with hash ${hash} ==" + return 0 + fi + done + done + + echo "== All downloads failed; sleeping before retrying ==" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum "${file}" | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "== Running nodeup ==" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured ==" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: gce +ClusterName: cilium-gce.example.com +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + servers: + - https://kops-controller.internal.cilium-gce.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: g3aIwfEP+fDZCt2xVgfwXmi+S03durEbPcYuEn4ISWY= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/cilium-gce/id_rsa.pub b/tests/integration/update_cluster/cilium-gce/id_rsa.pub new file mode 100755 index 0000000000000..81cb0127830e7 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/cilium-gce/in-v1alpha2.yaml b/tests/integration/update_cluster/cilium-gce/in-v1alpha2.yaml new file mode 100644 index 0000000000000..35ed1539ad449 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/in-v1alpha2.yaml @@ -0,0 +1,95 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + name: cilium-gce.example.com +spec: + api: + dns: {} + authorization: + rbac: {} + channel: stable + cloudProvider: gce + configBase: memfs://tests/cilium-gce.example.com + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - instanceGroup: master-us-test1-a + name: a + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - instanceGroup: master-us-test1-a + name: a + memoryRequest: 100Mi + name: events + cloudConfig: + gceServiceAccount: default + gcpPDCSIDriver: + enabled: false + iam: + legacy: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: v1.32.0 + masterPublicName: api.cilium-gce.example.com + networking: + gce: + cilium: {} + nonMasqueradeCIDR: 100.64.0.0/10 + project: testproject + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - name: us-test1 + region: us-test1 + type: Public + topology: + dns: + type: Public + +--- +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: cilium-gce.example.com + name: master-us-test1-a +spec: + image: ubuntu-os-cloud/ubuntu-2004-focal-v20221018 + cloudLabels: + testCloudLabel: foobar + machineType: e2-medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test1 + zones: + - us-test1-a + +--- +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: cilium-gce.example.com + name: nodes +spec: + image: ubuntu-os-cloud/ubuntu-2004-focal-v20221018 + machineType: e2-medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test1 + zones: + - us-test1-a + - us-test1-b diff --git a/tests/integration/update_cluster/cilium-gce/kubernetes.tf b/tests/integration/update_cluster/cilium-gce/kubernetes.tf new file mode 100644 index 0000000000000..939bad14032a6 --- /dev/null +++ b/tests/integration/update_cluster/cilium-gce/kubernetes.tf @@ -0,0 +1,680 @@ +locals { + cluster_name = "cilium-gce.example.com" + project = "testproject" + region = "us-test1" +} + +output "cluster_name" { + value = "cilium-gce.example.com" +} + +output "project" { + value = "testproject" +} + +output "region" { + value = "us-test1" +} + +provider "google" { + project = "testproject" + region = "us-test1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-bootstrap_content") + key = "tests/cilium-gce.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "tests/cilium-gce.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "tests/cilium-gce.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-gcp-cloud-controller-addons-k8s-io-k8s-1-23" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-gcp-cloud-controller.addons.k8s.io-k8s-1.23_content") + key = "tests/cilium-gce.example.com/addons/gcp-cloud-controller.addons.k8s.io/k8s-1.23.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "tests/cilium-gce.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "tests/cilium-gce.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-limit-range.addons.k8s.io_content") + key = "tests/cilium-gce.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-networking-cilium-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-networking.cilium.io-k8s-1.16_content") + key = "tests/cilium-gce.example.com/addons/networking.cilium.io/k8s-1.16-v1.15.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cilium-gce-example-com-addons-storage-gce-addons-k8s-io-v1-7-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cilium-gce.example.com-addons-storage-gce.addons.k8s.io-v1.7.0_content") + key = "tests/cilium-gce.example.com/addons/storage-gce.addons.k8s.io/v1.7.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "tests/cilium-gce.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "tests/cilium-gce.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "tests/cilium-gce.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "tests/cilium-gce.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test1-a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test1-a_content") + key = "tests/cilium-gce.example.com/manifests/etcd/events-master-us-test1-a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test1-a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test1-a_content") + key = "tests/cilium-gce.example.com/manifests/etcd/main-master-us-test1-a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "tests/cilium-gce.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test1-a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test1-a_content") + key = "tests/cilium-gce.example.com/igconfig/control-plane/master-us-test1-a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "tests/cilium-gce.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "google_compute_disk" "a-etcd-events-cilium-gce-example-com" { + labels = { + "k8s-io-cluster-name" = "cilium-gce-example-com" + "k8s-io-etcd-events" = "a-2fa" + "k8s-io-role-master" = "master" + } + name = "a-etcd-events-cilium-gce-example-com" + size = 20 + type = "pd-ssd" + zone = "us-test1-a" +} + +resource "google_compute_disk" "a-etcd-main-cilium-gce-example-com" { + labels = { + "k8s-io-cluster-name" = "cilium-gce-example-com" + "k8s-io-etcd-main" = "a-2fa" + "k8s-io-role-master" = "master" + } + name = "a-etcd-main-cilium-gce-example-com" + size = 20 + type = "pd-ssd" + zone = "us-test1-a" +} + +resource "google_compute_firewall" "kubernetes-master-https-cilium-gce-example-com" { + allow { + ports = ["443"] + protocol = "tcp" + } + disabled = false + name = "kubernetes-master-https-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["0.0.0.0/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_firewall" "kubernetes-master-https-ipv6-cilium-gce-example-com" { + allow { + ports = ["443"] + protocol = "tcp" + } + disabled = false + name = "kubernetes-master-https-ipv6-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["::/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_firewall" "master-to-master-cilium-gce-example-com" { + allow { + protocol = "tcp" + } + allow { + protocol = "udp" + } + allow { + protocol = "icmp" + } + allow { + protocol = "esp" + } + allow { + protocol = "ah" + } + allow { + protocol = "sctp" + } + disabled = false + name = "master-to-master-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_firewall" "master-to-node-cilium-gce-example-com" { + allow { + protocol = "tcp" + } + allow { + protocol = "udp" + } + allow { + protocol = "icmp" + } + allow { + protocol = "esp" + } + allow { + protocol = "ah" + } + allow { + protocol = "sctp" + } + disabled = false + name = "master-to-node-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "node-to-master-cilium-gce-example-com" { + allow { + ports = ["443"] + protocol = "tcp" + } + allow { + ports = ["10250"] + protocol = "tcp" + } + allow { + ports = ["3988"] + protocol = "tcp" + } + allow { + ports = ["8472"] + protocol = "udp" + } + allow { + ports = ["10257"] + protocol = "tcp" + } + allow { + ports = ["10259"] + protocol = "tcp" + } + allow { + ports = ["10249"] + protocol = "tcp" + } + allow { + ports = ["2382"] + protocol = "tcp" + } + disabled = false + name = "node-to-master-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_tags = ["cilium-gce-example-com-k8s-io-role-node"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_firewall" "node-to-node-cilium-gce-example-com" { + allow { + protocol = "tcp" + } + allow { + protocol = "udp" + } + allow { + protocol = "icmp" + } + allow { + protocol = "esp" + } + allow { + protocol = "ah" + } + allow { + protocol = "sctp" + } + disabled = false + name = "node-to-node-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_tags = ["cilium-gce-example-com-k8s-io-role-node"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "nodeport-external-to-node-cilium-gce-example-com" { + allow { + ports = ["30000-32767"] + protocol = "tcp" + } + allow { + ports = ["30000-32767"] + protocol = "udp" + } + disabled = true + name = "nodeport-external-to-node-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["0.0.0.0/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "nodeport-external-to-node-ipv6-cilium-gce-example-com" { + allow { + ports = ["30000-32767"] + protocol = "tcp" + } + allow { + ports = ["30000-32767"] + protocol = "udp" + } + disabled = true + name = "nodeport-external-to-node-ipv6-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["::/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "pod-cidrs-to-https-api-cilium-gce-example-com" { + allow { + ports = ["443"] + protocol = "tcp" + } + disabled = false + name = "pod-cidrs-to-https-api-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["10.4.0.0/14"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane"] +} + +resource "google_compute_firewall" "pod-cidrs-to-node-cilium-gce-example-com" { + allow { + protocol = "tcp" + } + allow { + protocol = "udp" + } + allow { + protocol = "icmp" + } + allow { + protocol = "esp" + } + allow { + protocol = "ah" + } + allow { + protocol = "sctp" + } + disabled = false + name = "pod-cidrs-to-node-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["10.4.0.0/14"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "pod-cidrs-to-node-ipv6-cilium-gce-example-com" { + allow { + protocol = "tcp" + } + allow { + protocol = "udp" + } + allow { + protocol = "58" + } + allow { + protocol = "esp" + } + allow { + protocol = "ah" + } + allow { + protocol = "sctp" + } + disabled = true + name = "pod-cidrs-to-node-ipv6-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["::/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "ssh-external-to-master-cilium-gce-example-com" { + allow { + ports = ["22"] + protocol = "tcp" + } + disabled = false + name = "ssh-external-to-master-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["0.0.0.0/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_firewall" "ssh-external-to-master-ipv6-cilium-gce-example-com" { + allow { + ports = ["22"] + protocol = "tcp" + } + disabled = false + name = "ssh-external-to-master-ipv6-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["::/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_firewall" "ssh-external-to-node-cilium-gce-example-com" { + allow { + ports = ["22"] + protocol = "tcp" + } + disabled = false + name = "ssh-external-to-node-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["0.0.0.0/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_firewall" "ssh-external-to-node-ipv6-cilium-gce-example-com" { + allow { + ports = ["22"] + protocol = "tcp" + } + disabled = false + name = "ssh-external-to-node-ipv6-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + source_ranges = ["::/0"] + target_tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_instance_group_manager" "a-master-us-test1-a-cilium-gce-example-com" { + base_instance_name = "master-us-test1-a" + lifecycle { + ignore_changes = [target_size] + } + list_managed_instances_results = "PAGINATED" + name = "a-master-us-test1-a-cilium-gce-example-com" + target_size = 1 + update_policy { + minimal_action = "REPLACE" + type = "OPPORTUNISTIC" + } + version { + instance_template = google_compute_instance_template.master-us-test1-a-cilium-gce-example-com.self_link + } + zone = "us-test1-a" +} + +resource "google_compute_instance_group_manager" "a-nodes-cilium-gce-example-com" { + base_instance_name = "nodes" + lifecycle { + ignore_changes = [target_size] + } + list_managed_instances_results = "PAGINATED" + name = "a-nodes-cilium-gce-example-com" + target_size = 1 + update_policy { + minimal_action = "REPLACE" + type = "OPPORTUNISTIC" + } + version { + instance_template = google_compute_instance_template.nodes-cilium-gce-example-com.self_link + } + zone = "us-test1-a" +} + +resource "google_compute_instance_group_manager" "b-nodes-cilium-gce-example-com" { + base_instance_name = "nodes" + lifecycle { + ignore_changes = [target_size] + } + list_managed_instances_results = "PAGINATED" + name = "b-nodes-cilium-gce-example-com" + target_size = 1 + update_policy { + minimal_action = "REPLACE" + type = "OPPORTUNISTIC" + } + version { + instance_template = google_compute_instance_template.nodes-cilium-gce-example-com.self_link + } + zone = "us-test1-b" +} + +resource "google_compute_instance_template" "master-us-test1-a-cilium-gce-example-com" { + can_ip_forward = false + disk { + auto_delete = true + boot = true + device_name = "persistent-disks-0" + disk_name = "" + disk_size_gb = 64 + disk_type = "pd-standard" + interface = "" + mode = "READ_WRITE" + provisioned_iops = 0 + provisioned_throughput = 0 + source = "" + source_image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20221018" + type = "PERSISTENT" + } + labels = { + "k8s-io-cluster-name" = "cilium-gce-example-com" + "k8s-io-instance-group" = "master-us-test1-a" + "k8s-io-role-control-plane" = "control-plane" + "k8s-io-role-master" = "master" + "testCloudLabel" = "foobar" + } + lifecycle { + create_before_destroy = true + } + machine_type = "e2-medium" + metadata = { + "cluster-name" = "cilium-gce.example.com" + "kops-k8s-io-instance-group-name" = "master-us-test1-a" + "ssh-keys" = "admin: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==" + "user-data" = file("${path.module}/data/google_compute_instance_template_master-us-test1-a-cilium-gce-example-com_metadata_user-data") + } + name_prefix = "master-us-test1-a-cilium--pkesht-" + network_interface { + access_config { + } + network = google_compute_network.cilium-gce-example-com.name + stack_type = "IPV4_ONLY" + subnetwork = google_compute_subnetwork.us-test1-cilium-gce-example-com.name + } + scheduling { + automatic_restart = true + on_host_maintenance = "MIGRATE" + preemptible = false + provisioning_model = "STANDARD" + } + service_account { + email = "default" + scopes = ["https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/ndev.clouddns.readwrite"] + } + tags = ["cilium-gce-example-com-k8s-io-role-control-plane", "cilium-gce-example-com-k8s-io-role-master"] +} + +resource "google_compute_instance_template" "nodes-cilium-gce-example-com" { + can_ip_forward = false + disk { + auto_delete = true + boot = true + device_name = "persistent-disks-0" + disk_name = "" + disk_size_gb = 128 + disk_type = "pd-standard" + interface = "" + mode = "READ_WRITE" + provisioned_iops = 0 + provisioned_throughput = 0 + source = "" + source_image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20221018" + type = "PERSISTENT" + } + labels = { + "k8s-io-cluster-name" = "cilium-gce-example-com" + "k8s-io-instance-group" = "nodes" + "k8s-io-role-node" = "node" + } + lifecycle { + create_before_destroy = true + } + machine_type = "e2-medium" + metadata = { + "cluster-name" = "cilium-gce.example.com" + "kops-k8s-io-instance-group-name" = "nodes" + "kube-env" = "AUTOSCALER_ENV_VARS: os_distribution=ubuntu;arch=amd64;os=linux" + "ssh-keys" = "admin: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==" + "user-data" = file("${path.module}/data/google_compute_instance_template_nodes-cilium-gce-example-com_metadata_user-data") + } + name_prefix = "nodes-cilium-gce-example-com-" + network_interface { + access_config { + } + network = google_compute_network.cilium-gce-example-com.name + stack_type = "IPV4_ONLY" + subnetwork = google_compute_subnetwork.us-test1-cilium-gce-example-com.name + } + scheduling { + automatic_restart = true + on_host_maintenance = "MIGRATE" + preemptible = false + provisioning_model = "STANDARD" + } + service_account { + email = "default" + scopes = ["https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/devstorage.read_only"] + } + tags = ["cilium-gce-example-com-k8s-io-role-node"] +} + +resource "google_compute_network" "cilium-gce-example-com" { + auto_create_subnetworks = false + name = "cilium-gce-example-com" +} + +resource "google_compute_subnetwork" "us-test1-cilium-gce-example-com" { + ip_cidr_range = "10.0.32.0/19" + name = "us-test1-cilium-gce-example-com" + network = google_compute_network.cilium-gce-example-com.name + region = "us-test1" + secondary_ip_range { + ip_cidr_range = "10.4.0.0/14" + range_name = "pods-cilium-gce-example-com" + } + secondary_ip_range { + ip_cidr_range = "10.1.0.0/16" + range_name = "services-cilium-gce-example-com" + } + stack_type = "IPV4_ONLY" +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 5.0.0" + } + google = { + "source" = "hashicorp/google" + "version" = ">= 5.11.0" + } + } +} diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.15.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.15.yaml.template index 1eb11bf897f79..de3fdadf69ee8 100644 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.15.yaml.template +++ b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.16-v1.15.yaml.template @@ -1,4 +1,8 @@ -{{ with .Networking.Cilium }} +{{ $cilium := .Networking.Cilium }} +{{ if and .Networking.GCP .Networking.GCP.Cilium }} +{{ $cilium = .Networking.GCP.Cilium }} +{{ end }} +{{ with $cilium }} --- {{ if CiliumSecret }} --- diff --git a/upup/pkg/fi/cloudup/apply_cluster.go b/upup/pkg/fi/cloudup/apply_cluster.go index 75500ade4c9a3..14189e4b80f49 100644 --- a/upup/pkg/fi/cloudup/apply_cluster.go +++ b/upup/pkg/fi/cloudup/apply_cluster.go @@ -377,6 +377,9 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) (*ApplyResults, error) { } ciliumSpec := c.Cluster.Spec.Networking.Cilium + if ciliumSpec == nil && c.Cluster.Spec.Networking.GCP != nil { + ciliumSpec = c.Cluster.Spec.Networking.GCP.Cilium + } if ciliumSpec != nil && ciliumSpec.EnableEncryption && ciliumSpec.EncryptionType == kops.CiliumEncryptionTypeIPSec { secret, err := secretStore.FindSecret("ciliumpassword") if err != nil { diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go index da9bdca91e5af..47cac5997f26f 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder/cilium.go @@ -24,6 +24,9 @@ import ( func addCiliumAddon(b *BootstrapChannelBuilder, addons *AddonList) error { cilium := b.Cluster.Spec.Networking.Cilium + if cilium == nil && b.Cluster.Spec.Networking.GCP != nil { + cilium = b.Cluster.Spec.Networking.GCP.Cilium + } if cilium == nil { return nil } diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder_test.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder_test.go index 718fb1909b916..4a5080034acac 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder_test.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder_test.go @@ -44,10 +44,12 @@ func TestBootstrapChannelBuilder_BuildTasks(t *testing.T) { defer h.Close() h.SetupMockAWS() + h.SetupMockGCE() runChannelBuilderTest(t, "simple", []string{"kops-controller.addons.k8s.io-k8s-1.16"}) // Use cilium networking, proxy runChannelBuilderTest(t, "cilium", []string{"kops-controller.addons.k8s.io-k8s-1.16"}) + runChannelBuilderTest(t, "gcp-with-cilium", []string{"kops-controller.addons.k8s.io-k8s-1.16"}) runChannelBuilderTest(t, "amazonvpc", []string{"networking.amazon-vpc-routed-eni-k8s-1.16"}) runChannelBuilderTest(t, "amazonvpc-containerd", []string{"networking.amazon-vpc-routed-eni-k8s-1.16"}) runChannelBuilderTest(t, "awsiamauthenticator/crd", []string{"authentication.aws-k8s-1.12"}) diff --git a/upup/pkg/fi/cloudup/gcetasks/subnet.go b/upup/pkg/fi/cloudup/gcetasks/subnet.go index ff74c64b017f3..3881bfc39bef6 100644 --- a/upup/pkg/fi/cloudup/gcetasks/subnet.go +++ b/upup/pkg/fi/cloudup/gcetasks/subnet.go @@ -19,6 +19,7 @@ package gcetasks import ( "fmt" "reflect" + "sort" compute "google.golang.org/api/compute/v1" "k8s.io/klog/v2" @@ -300,6 +301,12 @@ type terraformSubnetRange struct { CIDR string `cty:"ip_cidr_range"` } +type ByName []terraformSubnetRange + +func (a ByName) Len() int { return len(a) } +func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByName) Less(i, j int) bool { return a[i].Name < a[j].Name } + func (_ *Subnet) RenderSubnet(t *terraform.TerraformTarget, a, e, changes *Subnet) error { shared := fi.ValueOf(e.Shared) if shared { @@ -316,13 +323,18 @@ func (_ *Subnet) RenderSubnet(t *terraform.TerraformTarget, a, e, changes *Subne Ipv6AccessType: e.Ipv6AccessType, } + sortedRanges := make([]terraformSubnetRange, 0, len(e.SecondaryIpRanges)) + for k, v := range e.SecondaryIpRanges { - tf.SecondaryIPRange = append(tf.SecondaryIPRange, terraformSubnetRange{ + sortedRanges = append(sortedRanges, terraformSubnetRange{ Name: k, CIDR: v, }) } + sort.Sort(ByName(sortedRanges)) + tf.SecondaryIPRange = sortedRanges + return t.RenderResource("google_compute_subnetwork", *e.Name, tf) } diff --git a/upup/pkg/fi/cloudup/new_cluster.go b/upup/pkg/fi/cloudup/new_cluster.go index 506d87579cf4a..8f58c8a4a0baa 100644 --- a/upup/pkg/fi/cloudup/new_cluster.go +++ b/upup/pkg/fi/cloudup/new_cluster.go @@ -1275,6 +1275,16 @@ func setupNetworking(opt *NewClusterOptions, cluster *api.Cluster) error { cluster.Spec.Networking.Cilium.IPAM = "eni" case "gcp", "gce": cluster.Spec.Networking.GCP = &api.GCPNetworkingSpec{} + case "gcp-with-cilium": + //TODO: move into own function + cluster.Spec.Networking.GCP = &api.GCPNetworkingSpec{} + cluster.Spec.Networking.GCP.Cilium = &api.CiliumNetworkingSpec{} + cluster.Spec.Networking.GCP.Cilium.EnableNodePort = true + if cluster.Spec.KubeProxy == nil { + cluster.Spec.KubeProxy = &api.KubeProxyConfig{} + } + enabled := false + cluster.Spec.KubeProxy.Enabled = &enabled case "kindnet": cluster.Spec.Networking.Kindnet = &api.KindnetNetworkingSpec{} default: diff --git a/upup/pkg/fi/cloudup/template_functions.go b/upup/pkg/fi/cloudup/template_functions.go index d19e5c234fbf8..f02edd2668a70 100644 --- a/upup/pkg/fi/cloudup/template_functions.go +++ b/upup/pkg/fi/cloudup/template_functions.go @@ -315,7 +315,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS } } - if cluster.Spec.Networking.Cilium != nil { + if cluster.Spec.Networking.Cilium != nil || cluster.Spec.Networking.NetworkingIsGCPCilium() { ciliumsecretString := "" ciliumsecret, _ := secretStore.Secret("ciliumpassword") if ciliumsecret != nil { diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/cluster.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/cluster.yaml new file mode 100644 index 0000000000000..ab5a095755c5e --- /dev/null +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/cluster.yaml @@ -0,0 +1,62 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2026-03-23T22:11:00Z" + name: minimal.example.com +spec: + api: + loadBalancer: + type: Public + authorization: + rbac: {} + channel: stable + cloudConfig: {} + cloudProvider: gce + configBase: gs://clusters.example.com/minimal.example.com + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - instanceGroup: control-plane-us-test1-a + name: a + manager: + backupRetentionDays: 90 + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - instanceGroup: control-plane-us-test1-a + name: a + manager: + backupRetentionDays: 90 + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: 1.35.2 + networking: + gce: + cilium: + hubble: {} + ingress: {} + gatewayAPI: {} + nonMasqueradeCIDR: 10.0.0.0/8 + podCIDR: 10.4.0.0/14 + project: testproject + serviceClusterIPRange: 10.1.0.0/16 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 10.0.32.0/19 + name: us-test1 + region: us-test1 + type: Public + topology: + dns: + type: None \ No newline at end of file diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/kops-controller.addons.k8s.io-k8s-1.16.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/kops-controller.addons.k8s.io-k8s-1.16.yaml new file mode 100644 index 0000000000000..10fd439ce84fc --- /dev/null +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/kops-controller.addons.k8s.io-k8s-1.16.yaml @@ -0,0 +1,217 @@ +apiVersion: v1 +data: + config.yaml: | + {"clusterName":"minimal.example.com","cloud":"gce","configBase":"gs://clusters.example.com/minimal.example.com","secretStore":"gs://clusters.example.com/minimal.example.com/secrets","server":{"Listen":":3988","provider":{"gce":{"projectID":"testproject","region":"us-east-1","clusterName":"minimal.example.com","MaxTimeSkew":300}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.35.0-beta.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.35.0-beta.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.35.0-beta.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/manifest.yaml new file mode 100644 index 0000000000000..303d8465ea671 --- /dev/null +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/gcp-with-cilium/manifest.yaml @@ -0,0 +1,109 @@ +kind: Addons +metadata: + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: afa3a60e52b49207ba48b2e8bea21885bdef104049591ba90da794a09caba8ad + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: ee75b46d6035e1d0798bb98b982729d95ce94ccf1d94c6f745017fbde1cb3f91 + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: da91eb5cf9a29f1b03510007d6d54603aef2fc23a305abc9ba496c510dfd3bc7 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 686cc69e559a1c6f5e8b94e38de54a575a25c432ed5ceec565244b965fb5f07f + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: v1.7.0 + manifest: storage-gce.addons.k8s.io/v1.7.0.yaml + manifestHash: 4f73c7d683f04e61d60e90053a6db1fd82272dc0c13790390740f5e207181c77 + name: storage-gce.addons.k8s.io + selector: + k8s-addon: storage-gce.addons.k8s.io + version: 9.99.0 + - id: k8s-1.23 + manifest: gcp-pd-csi-driver.addons.k8s.io/k8s-1.23.yaml + manifestHash: 6feff18c6ec4651da2eccc5a8ed76408cf1fcd78d76dc2f3ffdce3d8c983df09 + name: gcp-pd-csi-driver.addons.k8s.io + selector: + k8s-addon: gcp-pd-csi-driver.addons.k8s.io + version: 9.99.0 + - id: k8s-1.23 + manifest: gcp-cloud-controller.addons.k8s.io/k8s-1.23.yaml + manifestHash: 81a6bb5e4ae011d97d2f35d9a776b025eef202e37a354373ad9e7e92259cac0e + name: gcp-cloud-controller.addons.k8s.io + prune: + kinds: + - kind: ConfigMap + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - kind: Service + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - kind: ServiceAccount + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: admissionregistration.k8s.io + kind: MutatingWebhookConfiguration + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: admissionregistration.k8s.io + kind: ValidatingWebhookConfiguration + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: apps + kind: DaemonSet + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: apps + kind: Deployment + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: apps + kind: StatefulSet + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: policy + kind: PodDisruptionBudget + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRole + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: ClusterRoleBinding + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + - group: rbac.authorization.k8s.io + kind: Role + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + - group: rbac.authorization.k8s.io + kind: RoleBinding + labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops + namespaces: + - kube-system + selector: + k8s-addon: gcp-cloud-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.16 + manifest: networking.cilium.io/k8s-1.16-v1.15.yaml + manifestHash: effb9c60ad6e4cb6da8587967778b01d06c29fa59f05ecd801f30cd9c8dcff38 + name: networking.cilium.io + needsPKI: true + needsRollingUpdate: all + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0