Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use chainguard dev image and set Pod Security Standard to restricted profile for the Kubecost copier #32

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmd/diskautoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func main() {

mux := http.NewServeMux()

err = diskscaler.Setup(mux, k8sRest, baseK8sClient, dynamicK8sClient)
err = diskscaler.Setup(mux, k8sRest, baseK8sClient, dynamicK8sClient, Version)
if err != nil {
log.Error().Err(err).Msgf("Kubescaler setup failed")
}
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ require (
k8s.io/apimachinery v0.30.1
k8s.io/client-go v0.30.1
k8s.io/kubectl v0.30.1
sigs.k8s.io/yaml v1.3.0
)

require (
Expand Down Expand Up @@ -70,5 +71,4 @@ require (
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
44 changes: 44 additions & 0 deletions pkg/diskscaler/copier.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ .PodName }}
namespace: {{ .Namespace }}
labels:
app.kubernetes.io/name: {{.Name }}
app.kubernetes.io/instance: {{ .PodName }}
app.kubernetes.io/version: {{ .Version }}
app.kubernetes.io/component: {{ .Component }}
app.kubernetes.io/part-of: {{ .PartOf }}
app.kubernetes.io/managed-by: {{ .Service }}
spec:
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
runAsUser: {{ .RunAsUser }}
containers:
- name: {{ .ContainerName }}
image: {{ .Image }}
command: ["/bin/sh", "-c", "--"]
args: ["sleep infinity"]
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
seccompProfile:
type: RuntimeDefault
volumeMounts:
- name: orig-vol-mount
mountPath: /oldData
- name: backup-vol-mount
mountPath: /newData
volumes:
- name: orig-vol-mount
persistentVolumeClaim:
claimName: {{.OriginalPVC}}
- name: backup-vol-mount
persistentVolumeClaim:
claimName: {{.NewPVC}}
141 changes: 94 additions & 47 deletions pkg/diskscaler/diskscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,12 @@ package diskscaler
import (
"bytes"
"context"
"embed"
"fmt"
"html/template"
"io"
"math/rand"
"os"
"slices"
"strconv"
"strings"
Expand All @@ -24,6 +28,7 @@ import (
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/retry"
"k8s.io/kubectl/pkg/scheme"
"sigs.k8s.io/yaml"
)

var supportedSCProvisioner = []string{"ebs.csi.aws.com"}
Expand All @@ -35,17 +40,41 @@ const (
inactivityDuringDelay = 10 * time.Second
// Setting a timeout of 4 minutes on any creation or delete operation of disk scaler
diskScalingOperationTimeout = 4 * time.Minute
copierContainerName = "temp-container"
copierDefaultImage = "cgr.dev/chainguard/wolfi-base:latest"
ServiceMetaDataLabel = "kubecost-disk-autoscaler"
PartOfMetaDataLabel = "kubecost-disk-autoscaler"
ComponentMetaDataLabel = "copy-pod"
)

var allowedCharactersForPVCName = []rune("abcdefghijklmnopqrstuvwxyz")

type CopierTemplateData struct {
Name string
Version string
Component string
PartOf string
Service string
PodName string
Namespace string
ContainerName string
OriginalPVC string
NewPVC string
Image string
RunAsUser int64
}

//go:embed copier.yaml
var templateFS embed.FS

type DiskScaler struct {
clientConfig *rest.Config
basicK8sClient kubernetes.Interface
dynamicK8sClient *dynamic.DynamicClient
clusterID string
kubecostsvc *pvsizingrecommendation.KubecostService
auditMode bool
version string
}

type pvcDetails struct {
Expand All @@ -66,7 +95,8 @@ func NewDiskScaler(clientConfig *rest.Config,
dynamicK8sClient *dynamic.DynamicClient,
clusterID string,
kubecostsvc *pvsizingrecommendation.KubecostService,
auditMode bool) (*DiskScaler, error) {
auditMode bool,
version string) (*DiskScaler, error) {
if basicK8sClient == nil {
return nil, fmt.Errorf("must have a Kubernetes client")
}
Expand All @@ -82,6 +112,7 @@ func NewDiskScaler(clientConfig *rest.Config,
clusterID: clusterID,
kubecostsvc: kubecostsvc,
auditMode: auditMode,
version: version,
}, nil
}

Expand Down Expand Up @@ -616,54 +647,13 @@ func (ds *DiskScaler) createPVCFromASpec(ctx context.Context, namespace string,
// dataMoverTransientPod create a transient pod to move data between original PV claim volume source to new PV Claim volume source
func (ds *DiskScaler) dataMoverTransientPod(ctx context.Context, namespace string, copierPodName string, originalPVC string, newPVC string) error {
cpCommand := "if [ -z \"$(ls -A /oldData)\" ]; then echo \"directory is empty no need to copy\"; else cp -r /oldData/* /newData/; fi"
req := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: copierPodName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "temp-container",
Image: "ubuntu",
Command: []string{"/bin/bash", "-c", "sleep infinity"},
VolumeMounts: []v1.VolumeMount{
{
Name: "orig-vol-mount",
MountPath: "/oldData",
},
{
Name: "backup-vol-mount",
MountPath: "/newData",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "orig-vol-mount",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: originalPVC,
},
},
},
{
Name: "backup-vol-mount",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: newPVC,
},
},
},
},
},

podSpec, err := createCopierPodKubernetesObject(copierPodName, namespace, originalPVC, newPVC, ds.version)
if err != nil {
return fmt.Errorf("createCopierPodKubernetesObject failed: %w", err)
}

resp, err := ds.basicK8sClient.CoreV1().Pods(namespace).Create(ctx, req, metav1.CreateOptions{})
resp, err := ds.basicK8sClient.CoreV1().Pods(namespace).Create(ctx, &podSpec, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create copier pod %s in namespace %s with err: %w ", copierPodName, namespace, err)
}
Expand Down Expand Up @@ -819,6 +809,63 @@ func (ds *DiskScaler) deleteTransientPod(ctx context.Context, namespace string,
return nil
}

func createCopierPodKubernetesObject(copierPodName, namespace, originalPVC, newPVC, version string) (v1.Pod, error) {
// Read the pod template YAML file
podTemplateYAML, err := templateFS.ReadFile("copier.yaml")
// podTemplateYAML, err := os.ReadFile("copier.yaml")
if err != nil {
return v1.Pod{}, fmt.Errorf("failed to read the copier.yaml with err: %w ", err)
}

data := CopierTemplateData{
Name: kubecostDataMoverTransientPodName,
Version: version,
Component: ComponentMetaDataLabel,
PartOf: PartOfMetaDataLabel,
Service: ServiceMetaDataLabel,
PodName: copierPodName,
Namespace: namespace,
ContainerName: copierContainerName,
Image: copierDefaultImage,
OriginalPVC: originalPVC,
NewPVC: newPVC,
RunAsUser: 1000,
}

//tmpl, err := template.New("pod").Parse(string(podTemplateYAML))
tmpl := template.Must(template.New("pod").Parse(string(podTemplateYAML)))

// Create a buffer to hold the final YAML
var buf []byte
bufFile, err := os.CreateTemp("", fmt.Sprintf("%s.yaml", copierPodName))
if err != nil {
return v1.Pod{}, fmt.Errorf("failed to create temp file for pod %s in namespace %s: %v", copierPodName, namespace, err)
}
defer os.Remove(bufFile.Name())

// Execute the template and write to the buffer
if err := tmpl.Execute(bufFile, data); err != nil {
return v1.Pod{}, fmt.Errorf("failed to execute template for pod %s in namespace %s: %v", copierPodName, namespace, err)
}

// Read the buffer content as a string
_, err = bufFile.Seek(0, 0)
if err != nil {
return v1.Pod{}, fmt.Errorf("unable to read entire buffer to execute template for pod %s in namespace %s: %v", copierPodName, namespace, err)
}
buf, err = io.ReadAll(bufFile)
if err != nil {
return v1.Pod{}, fmt.Errorf("failed to read temp file for pod %s in namespace %s: %v", copierPodName, namespace, err)
}

var pod v1.Pod
if err := yaml.Unmarshal(buf, &pod); err != nil {
return v1.Pod{}, fmt.Errorf("failed to unmarshal YAML for pod %s in namespace %s: %v", copierPodName, namespace, err)
}

return pod, nil
}

// isGreaterQuantity returns true if resizeTo is greater than original size
func isGreaterQuantity(originalSize resource.Quantity, resizeTo resource.Quantity) bool {
return resizeTo.Cmp(originalSize) == 1
Expand Down
11 changes: 6 additions & 5 deletions pkg/diskscaler/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,11 @@ func NewDiskScalerService(clientConfig *rest.Config,
resizeAll bool,
auditMode bool,
kubecostSvc *pvsizingrecommendation.KubecostService,
excludedNamespaces []string) (*DiskScalerService, error) {
excludedNamespaces []string,
version string) (*DiskScalerService, error) {
// To-DO :fill it via kubecost API
clusterID := "localCluster"
ds, err := NewDiskScaler(clientConfig, k8sClient, dynamicK8sClient, clusterID, kubecostSvc, auditMode)
ds, err := NewDiskScaler(clientConfig, k8sClient, dynamicK8sClient, clusterID, kubecostSvc, auditMode, version)
if err != nil {
return nil, fmt.Errorf("unable to create NewDiskScaler: %w", err)
}
Expand Down Expand Up @@ -204,12 +205,12 @@ func (dss *DiskScalerService) startAutomatedScaling() error {
if dss.auditMode {
return
}
log.Debug().Msgf("status at %s :%+v, triggered the disk scaling", diskAutoScalerRun, status)
log.Info().Msgf("status at %s :%+v, triggered the disk scaling", diskAutoScalerRun, status)
if status.NumEnabled == 0 {
log.Debug().Msgf("No workloads have autoscaling enabled at %s", diskAutoScalerRun)
log.Info().Msgf("No workloads have autoscaling enabled at %s", diskAutoScalerRun)
}
if status.NumEligible == 0 {
log.Debug().Msgf("No workload with autoscaling eligible at %s", diskAutoScalerRun)
log.Info().Msgf("No workload with autoscaling eligible at %s", diskAutoScalerRun)
}
}
}()
Expand Down
4 changes: 2 additions & 2 deletions pkg/diskscaler/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ const (
KubecostNamespace = "kubecost"
)

func Setup(mux *http.ServeMux, clientConfig *rest.Config, k8sClient kubernetes.Interface, dynamicK8sClient *dynamic.DynamicClient) error {
func Setup(mux *http.ServeMux, clientConfig *rest.Config, k8sClient kubernetes.Interface, dynamicK8sClient *dynamic.DynamicClient, version string) error {
costModelPath, err := getDiskScalerCostModelPath()
if len(costModelPath) == 0 {
return fmt.Errorf("setup of Disk Auto Scaler failed: %w", err)
Expand Down Expand Up @@ -47,7 +47,7 @@ func Setup(mux *http.ServeMux, clientConfig *rest.Config, k8sClient kubernetes.I
}

recommendationSvc := pvsizingrecommendation.NewKubecostService(costModelPath)
dss, err := NewDiskScalerService(clientConfig, k8sClient, dynamicK8sClient, resizeAll, auditMode, recommendationSvc, excludedNamespaces)
dss, err := NewDiskScalerService(clientConfig, k8sClient, dynamicK8sClient, resizeAll, auditMode, recommendationSvc, excludedNamespaces, version)
if err != nil {
return fmt.Errorf("failed to create disk scaler service: %w", err)
}
Expand Down