diff --git a/bootstrap/controllers/ck8sconfig_controller.go b/bootstrap/controllers/ck8sconfig_controller.go index bdd70113..a1884e7a 100644 --- a/bootstrap/controllers/ck8sconfig_controller.go +++ b/bootstrap/controllers/ck8sconfig_controller.go @@ -213,6 +213,9 @@ func (r *CK8sConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) } func (r *CK8sConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) error { + + log := r.Log.WithValues("scope.Config", scope.Config) + machine := &clusterv1.Machine{} if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil { return fmt.Errorf("cannot convert %s to Machine: %w", scope.ConfigOwner.GetKind(), err) @@ -242,6 +245,11 @@ func (r *CK8sConfigReconciler) joinControlplane(ctx context.Context, scope *Scop ControlPlaneEndpoint: scope.Cluster.Spec.ControlPlaneEndpoint.Host, ControlPlaneConfig: controlPlaneConfig, }) + log.Info("-----------------------------------------") + log.Info("Config.Name: %v\n", scope.Config.Name) + log.Info("extraSANs: %v\n", controlPlaneConfig.ExtraSANs) + log.Info("-----------------------------------------") + joinConfig, err := kubeyaml.Marshal(configStruct) if err != nil { return err diff --git a/c1.yaml b/c1.yaml new file mode 100644 index 00000000..6d3863e9 --- /dev/null +++ b/c1.yaml @@ -0,0 +1,103 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: c1 + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 10.1.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.152.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: CK8sControlPlane + name: c1-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: c1 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: c1 + namespace: default +spec: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: CK8sControlPlane +metadata: + name: c1-control-plane + namespace: default +spec: + machineTemplate: + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: c1-control-plane + replicas: 3 + spec: + airGapped: true + controlPlane: + extraKubeAPIServerArgs: + --anonymous-auth: "true" + version: v1.29.6 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: c1-control-plane + namespace: default +spec: + template: + spec: + customImage: k8s-snap:dev-old +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: c1-worker-md-0 + namespace: default +spec: + clusterName: c1 + replicas: 3 + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: c1 + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: CK8sConfigTemplate + name: c1-md-0 + clusterName: c1 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: c1-md-0 + version: v1.29.6 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: c1-md-0 + namespace: default +spec: + template: + spec: + customImage: k8s-snap:dev-old +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 +kind: CK8sConfigTemplate +metadata: + name: c1-md-0 + namespace: default +spec: + template: + spec: + airGapped: true diff --git a/templates/docker/Dockerfile b/templates/docker/Dockerfile index 0aa0ef84..e13b60d9 100644 --- a/templates/docker/Dockerfile +++ b/templates/docker/Dockerfile @@ -87,12 +87,21 @@ FROM builder AS build-helm RUN /src/k8s-snap/build-scripts/build-component.sh helm ## kubernetes build +RUN cat /src/k8s-snap/build-scripts/components/kubernetes/version FROM builder AS build-kubernetes ENV KUBERNETES_VERSION=${KUBERNETES_VERSION} RUN if [ -n "$KUBERNETES_VERSION" ]; then \ + echo "Overwriting Kubernetes version with $KUBERNETES_VERSION"; \ echo "$KUBERNETES_VERSION" > /src/k8s-snap/build-scripts/components/kubernetes/version; \ + cat /src/k8s-snap/build-scripts/components/kubernetes/version; \ fi -RUN /src/k8s-snap/build-scripts/build-component.sh kubernetes +RUN cat /src/k8s-snap/build-scripts/components/kubernetes/version +RUN echo "Kubernetes version: $KUBERNETES_VERSION" \ +&& cat /src/k8s-snap/build-scripts/components/kubernetes/version \ +&& /src/k8s-snap/build-scripts/build-component.sh kubernetes +RUN ls /out +RUN /out/bin/kubectl version --client +RUN /out/bin/kubelet --version ## runc build FROM builder AS build-runc @@ -162,3 +171,7 @@ ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/k8s/ ## NOTE(neoaggelos): Required for containerd to properly set up overlayfs for pods VOLUME ["/var/snap/k8s/common/var/lib/containerd"] + +## NOTE(ben): Remove exisitng kind image kubectl and kubelet binaries +# to avoid version confusion. +RUN rm -f /usr/bin/kubectl /usr/bin/kubelet diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index dd54f95a..cc0593cb 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -152,34 +152,34 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }, result) - By("Upgrading the Kubernetes control-plane") - UpgradeControlPlaneAndWaitForUpgrade(ctx, UpgradeControlPlaneAndWaitForUpgradeInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, - ControlPlane: result.ControlPlane, - KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), - UpgradeMachineTemplate: ptr.To(fmt.Sprintf("%s-control-plane-old", clusterName)), - WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), - }) - - By("Upgrading the machine deployment") - framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: result.Cluster, - UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), - MachineDeployments: result.MachineDeployments, - UpgradeMachineTemplate: ptr.To(fmt.Sprintf("%s-md-1.30-0", clusterName)), - WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), - }) - - By("Waiting until nodes are ready") - workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, result.Cluster.Name) - workloadClient := workloadProxy.GetClient() - framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{ - Lister: workloadClient, - KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), - Count: int(result.ExpectedTotalNodes()), - WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), - }) + /* By("Upgrading the Kubernetes control-plane") + UpgradeControlPlaneAndWaitForUpgrade(ctx, UpgradeControlPlaneAndWaitForUpgradeInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: result.Cluster, + ControlPlane: result.ControlPlane, + KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + UpgradeMachineTemplate: ptr.To(fmt.Sprintf("%s-control-plane-old", clusterName)), + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"), + }) + + By("Upgrading the machine deployment") + framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: result.Cluster, + UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + MachineDeployments: result.MachineDeployments, + UpgradeMachineTemplate: ptr.To(fmt.Sprintf("%s-md-1.30-0", clusterName)), + WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) */ + + /* By("Waiting until nodes are ready") + workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, result.Cluster.Name) + workloadClient := workloadProxy.GetClient() + framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{ + Lister: workloadClient, + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + Count: int(result.ExpectedTotalNodes()), + WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"), + }) */ }) } diff --git a/test/e2e/cluster_upgrade_test.go b/test/e2e/cluster_upgrade_test.go index 5829f75d..4de856e6 100644 --- a/test/e2e/cluster_upgrade_test.go +++ b/test/e2e/cluster_upgrade_test.go @@ -25,7 +25,7 @@ import ( ) var _ = Describe("Workload cluster upgrade [CK8s-Upgrade]", func() { - Context("Upgrading a cluster with 1 control plane", func() { + /* Context("Upgrading a cluster with 1 control plane", func() { It("Non-HA upgrades require in-place upgrades which are not supported yet.", // TODO(ben): Enable this test once we have support for in-place upgrades. func() { Skip("") }, @@ -42,7 +42,7 @@ var _ = Describe("Workload cluster upgrade [CK8s-Upgrade]", func() { WorkerMachineCount: ptr.To[int64](2), } }) - }) + }) */ Context("Upgrading a cluster with HA control plane", func() { ClusterUpgradeSpec(ctx, func() ClusterUpgradeSpecInput { @@ -54,7 +54,7 @@ var _ = Describe("Workload cluster upgrade [CK8s-Upgrade]", func() { SkipCleanup: skipCleanup, InfrastructureProvider: ptr.To("docker"), ControlPlaneMachineCount: ptr.To[int64](3), - WorkerMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](0), } }) })