Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 83 additions & 53 deletions controllers/handlers_helm.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ func undeployHelmChartsInPullMode(ctx context.Context, c client.Client, clusterS
if err != nil {
return err
}
setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureHelm, profileRef, nil)
setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureHelm, profileRef, nil, true)

// If charts have pre/post delete hooks, those need to be deployed. A ConfigurationGroup to deploy those
// is created. If this does not exist yet assume we still have to deploy those.
Expand Down Expand Up @@ -2207,73 +2207,64 @@ func updateChartsInClusterConfiguration(ctx context.Context, c client.Client, cl
func undeployStaleReleases(ctx context.Context, c client.Client, clusterSummary *configv1beta1.ClusterSummary,
kubeconfig string, logger logr.Logger) ([]configv1beta1.ReleaseReport, error) {

chartManager, err := chartmanager.GetChartManagerInstance(ctx, c)
staleReleases, err := getStaleReleases(ctx, c, clusterSummary, logger)
if err != nil {
logger.V(logs.LogInfo).Error(err, "failed to get list of stale helm releases")
return nil, err
}

managedHelmReleases := chartManager.GetManagedHelmReleases(clusterSummary)

// Build map of current referenced helm charts
currentlyReferencedReleases := make(map[string]bool)
for i := range clusterSummary.Spec.ClusterProfileSpec.HelmCharts {
currentChart := &clusterSummary.Spec.ClusterProfileSpec.HelmCharts[i]
currentlyReferencedReleases[chartManager.GetReleaseKey(currentChart.ReleaseNamespace, currentChart.ReleaseName)] = true
chartManager, err := chartmanager.GetChartManagerInstance(ctx, c)
if err != nil {
return nil, err
}

reports := make([]configv1beta1.ReleaseReport, 0)

for i := range managedHelmReleases {
releaseKey := chartManager.GetReleaseKey(managedHelmReleases[i].Namespace, managedHelmReleases[i].Name)
if _, ok := currentlyReferencedReleases[releaseKey]; !ok {
logger.V(logs.LogInfo).Info(fmt.Sprintf("helm release %s (namespace %s) used to be managed but not referenced anymore",
managedHelmReleases[i].Name, managedHelmReleases[i].Namespace))

_, err := getReleaseInfo(managedHelmReleases[i].Name,
managedHelmReleases[i].Namespace, kubeconfig, &registryClientOptions{}, false)
if err != nil {
if errors.Is(err, driver.ErrReleaseNotFound) {
continue
}
return nil, err
for i := range staleReleases {
_, err := getReleaseInfo(staleReleases[i].Name,
staleReleases[i].Namespace, kubeconfig, &registryClientOptions{}, false)
if err != nil {
if errors.Is(err, driver.ErrReleaseNotFound) {
continue
}
return nil, err
}

if clusterSummary.Spec.ClusterProfileSpec.SyncMode != configv1beta1.SyncModeDryRun {
// If another ClusterSummary is queued to manage this chart in this cluster, do not uninstall.
// Let the other ClusterSummary take it over.

currentChart := &configv1beta1.HelmChart{
ReleaseNamespace: managedHelmReleases[i].Namespace,
ReleaseName: managedHelmReleases[i].Name,
}
otherRegisteredClusterSummaries := chartManager.GetRegisteredClusterSummariesForChart(
clusterSummary.Spec.ClusterNamespace, clusterSummary.Spec.ClusterName,
clusterSummary.Spec.ClusterType, currentChart)
if len(otherRegisteredClusterSummaries) > 1 {
// Immediately unregister so next inline ClusterSummary can take this over
chartManager.UnregisterClusterSummaryForChart(clusterSummary, currentChart)
err = requeueAllOtherClusterSummaries(ctx, c, clusterSummary.Spec.ClusterNamespace,
otherRegisteredClusterSummaries, logger)
if err != nil {
// TODO: Handle errors to prevent bad state. ClusterSummary no longer manage the chart,
// but no other ClusterSummary instance has been requeued.
return nil, err
}
if clusterSummary.Spec.ClusterProfileSpec.SyncMode != configv1beta1.SyncModeDryRun {
// If another ClusterSummary is queued to manage this chart in this cluster, do not uninstall.
// Let the other ClusterSummary take it over.

continue
}
currentChart := &configv1beta1.HelmChart{
ReleaseNamespace: staleReleases[i].Namespace,
ReleaseName: staleReleases[i].Name,
}
otherRegisteredClusterSummaries := chartManager.GetRegisteredClusterSummariesForChart(
clusterSummary.Spec.ClusterNamespace, clusterSummary.Spec.ClusterName,
clusterSummary.Spec.ClusterType, currentChart)
if len(otherRegisteredClusterSummaries) > 1 {
// Immediately unregister so next inline ClusterSummary can take this over
chartManager.UnregisterClusterSummaryForChart(clusterSummary, currentChart)
err = requeueAllOtherClusterSummaries(ctx, c, clusterSummary.Spec.ClusterNamespace,
otherRegisteredClusterSummaries, logger)
if err != nil {
// TODO: Handle errors to prevent bad state. ClusterSummary no longer manage the chart,
// but no other ClusterSummary instance has been requeued.
return nil, err
}

if err := uninstallRelease(ctx, clusterSummary, managedHelmReleases[i].Name,
managedHelmReleases[i].Namespace, kubeconfig, &registryClientOptions{}, nil, logger); err != nil {
return nil, err
continue
}
}

reports = append(reports, configv1beta1.ReleaseReport{
ReleaseNamespace: managedHelmReleases[i].Namespace, ReleaseName: managedHelmReleases[i].Name,
Action: string(configv1beta1.UninstallHelmAction),
})
if err := uninstallRelease(ctx, clusterSummary, staleReleases[i].Name,
staleReleases[i].Namespace, kubeconfig, &registryClientOptions{}, nil, logger); err != nil {
return nil, err
}

reports = append(reports, configv1beta1.ReleaseReport{
ReleaseNamespace: staleReleases[i].Namespace, ReleaseName: staleReleases[i].Name,
Action: string(configv1beta1.UninstallHelmAction),
})
}

return reports, nil
Expand Down Expand Up @@ -4445,7 +4436,14 @@ func commitStagedResourcesForDeployment(ctx context.Context, clusterSummary *con
return err
}

setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureHelm, profileRef, configurationHash)
staleReleases, err := getStaleReleases(ctx, getManagementClusterClient(), clusterSummary, logger)
if err != nil {
logger.V(logs.LogInfo).Error(err, "failed to get list of stale helm releases")
return err
}

// if a stale helm release is being deleted, run the pre/post delete checks
setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureHelm, profileRef, configurationHash, len(staleReleases) != 0)
// Commit deployment
return pullmode.CommitStagedResourcesForDeployment(ctx, getManagementClusterClient(),
clusterSummary.Spec.ClusterNamespace, clusterSummary.Spec.ClusterName, configv1beta1.ClusterSummaryKind,
Expand Down Expand Up @@ -4569,3 +4567,35 @@ func removeCachedData(settings *cli.EnvSettings, name, repoURL string, registryO

_ = repoAddOrUpdate(settings, name, repoURL, registryOptions, logger)
}

// getStaleReleases returns releases which used to be managed by the ClusterSummary but are not referenced anymore
func getStaleReleases(ctx context.Context, c client.Client, clusterSummary *configv1beta1.ClusterSummary,
logger logr.Logger) ([]chartmanager.HelmReleaseInfo, error) {

chartManager, err := chartmanager.GetChartManagerInstance(ctx, c)
if err != nil {
return nil, err
}

managedHelmReleases := chartManager.GetManagedHelmReleases(clusterSummary)

// Build map of current referenced helm charts
currentlyReferencedReleases := make(map[string]bool)
for i := range clusterSummary.Spec.ClusterProfileSpec.HelmCharts {
currentChart := &clusterSummary.Spec.ClusterProfileSpec.HelmCharts[i]
currentlyReferencedReleases[chartManager.GetReleaseKey(currentChart.ReleaseNamespace, currentChart.ReleaseName)] = true
}

staleReleases := make([]chartmanager.HelmReleaseInfo, 0)

for i := range managedHelmReleases {
releaseKey := chartManager.GetReleaseKey(managedHelmReleases[i].Namespace, managedHelmReleases[i].Name)
if _, ok := currentlyReferencedReleases[releaseKey]; !ok {
logger.V(logs.LogInfo).Info(fmt.Sprintf("helm release %s (namespace %s) used to be managed but not referenced anymore",
managedHelmReleases[i].Name, managedHelmReleases[i].Namespace))
staleReleases = append(staleReleases, managedHelmReleases[i])
}
}

return staleReleases, nil
}
4 changes: 2 additions & 2 deletions controllers/handlers_kustomize.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,8 +219,8 @@ func processKustomizeDeployment(ctx context.Context, remoteRestConfig *rest.Conf
}

if isPullMode {
setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureKustomize, profileRef, configurationHash)

setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureKustomize, profileRef,
configurationHash, false)
err = pullmode.CommitStagedResourcesForDeployment(ctx, c,
clusterSummary.Spec.ClusterNamespace, clusterSummary.Spec.ClusterName, configv1beta1.ClusterSummaryKind,
clusterSummary.Name, string(libsveltosv1beta1.FeatureKustomize),
Expand Down
6 changes: 3 additions & 3 deletions controllers/handlers_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ func postProcessDeployedResources(ctx context.Context, remoteRestConfig *rest.Co
}

if isPullMode {
setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureResources, profileRef, configurationHash)

setters := prepareSetters(clusterSummary, libsveltosv1beta1.FeatureResources, profileRef,
configurationHash, false)
err = pullmode.CommitStagedResourcesForDeployment(ctx, c,
clusterSummary.Spec.ClusterNamespace, clusterSummary.Spec.ClusterName, configv1beta1.ClusterSummaryKind,
clusterSummary.Name, string(libsveltosv1beta1.FeatureResources),
Expand Down Expand Up @@ -486,7 +486,7 @@ func pullModeUndeployResources(ctx context.Context, c client.Client, clusterSumm
return err
}

setters := prepareSetters(clusterSummary, fID, profileRef, nil)
setters := prepareSetters(clusterSummary, fID, profileRef, nil, true)

// discard all previous staged resources. This will instruct agent to undeploy
err = pullmode.RemoveDeployedResources(ctx, c, clusterSummary.Spec.ClusterNamespace,
Expand Down
11 changes: 8 additions & 3 deletions controllers/handlers_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1456,7 +1456,7 @@ func getPatchesHash(ctx context.Context, clusterSummary *configv1beta1.ClusterSu
}

func prepareSetters(clusterSummary *configv1beta1.ClusterSummary, featureID libsveltosv1beta1.FeatureID,
profileRef *corev1.ObjectReference, configurationHash []byte) []pullmode.Option {
profileRef *corev1.ObjectReference, configurationHash []byte, includeDeleteChecks bool) []pullmode.Option {

setters := make([]pullmode.Option, 0)
if clusterSummary.Spec.ClusterProfileSpec.SyncMode == configv1beta1.SyncModeContinuousWithDriftDetection {
Expand Down Expand Up @@ -1495,10 +1495,15 @@ func prepareSetters(clusterSummary *configv1beta1.ClusterSummary, featureID libs
pullmode.WithContinueOnConflict(clusterSummary.Spec.ClusterProfileSpec.ContinueOnConflict),
pullmode.WithContinueOnError(clusterSummary.Spec.ClusterProfileSpec.ContinueOnError),
pullmode.WithValidateHealths(clusterSummary.Spec.ClusterProfileSpec.ValidateHealths),
pullmode.WithPreDeleteChecks(clusterSummary.Spec.ClusterProfileSpec.PreDeleteChecks),
pullmode.WithPostDeleteChecks(clusterSummary.Spec.ClusterProfileSpec.PostDeleteChecks),
pullmode.WithDeployedGVKs(gvks))

if includeDeleteChecks {
setters = append(setters,
pullmode.WithPreDeleteChecks(clusterSummary.Spec.ClusterProfileSpec.PreDeleteChecks),
pullmode.WithPostDeleteChecks(clusterSummary.Spec.ClusterProfileSpec.PostDeleteChecks),
)
}

// Do not check on profileOwnerRef being not nil. It must always be passed
sourceRef := corev1.ObjectReference{
APIVersion: profileRef.APIVersion,
Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ require (
github.com/Masterminds/semver/v3 v3.4.0
github.com/TwiN/go-color v1.4.1
github.com/dariubs/percent v1.0.0
github.com/docker/cli v29.1.5+incompatible
github.com/docker/cli v29.2.1+incompatible
github.com/fluxcd/pkg/apis/meta v1.25.0
github.com/fluxcd/pkg/http/fetch v0.22.0
github.com/fluxcd/pkg/tar v0.17.0
Expand Down Expand Up @@ -183,4 +183,4 @@ require (

// Replace digest lib to master to gather access to BLAKE3.
// xref: https://github.com/opencontainers/go-digest/pull/66
replace github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.1-0.20250813155314-89707e38ad1a
replace github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.1-0.20250813155314-89707e38ad1a
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docker/cli v29.1.5+incompatible h1:GckbANUt3j+lsnQ6eCcQd70mNSOismSHWt8vk2AX8ao=
github.com/docker/cli v29.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg=
github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
Expand Down
4 changes: 2 additions & 2 deletions hack/tools/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.25.6

require (
github.com/a8m/envsubst v1.4.3
github.com/onsi/ginkgo/v2 v2.27.5
github.com/onsi/ginkgo/v2 v2.28.1
golang.org/x/oauth2 v0.34.0
golang.org/x/tools v0.41.0
k8s.io/client-go v0.35.0
Expand All @@ -28,7 +28,7 @@ require (
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gobuffalo/flect v1.0.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
Expand Down
12 changes: 6 additions & 6 deletions hack/tools/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7O
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
Expand Down Expand Up @@ -115,10 +115,10 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.27.5 h1:ZeVgZMx2PDMdJm/+w5fE/OyG6ILo1Y3e+QX4zSR0zTE=
github.com/onsi/ginkgo/v2 v2.27.5/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
github.com/onsi/gomega v1.39.0 h1:y2ROC3hKFmQZJNFeGAMeHZKkjBL65mIZcvrLQBF9k6Q=
github.com/onsi/gomega v1.39.0/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
Expand Down
3 changes: 1 addition & 2 deletions test/fv/delete_check_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,7 @@ spec:
- "gh-pages.github.com"`
)

// TODO: Add to NEW-PULLMODE as well once sveltos-applier is taken care of
It("Pre Delete checks blocks an uninstall", Label("NEW-FV", "EXTENDED"), func() {
It("Pre Delete checks blocks an uninstall", Label("NEW-FV", "NEW-FV-PULLMODE", "EXTENDED"), func() {
Byf("Create a ClusterProfile matching Cluster %s/%s",
kindWorkloadCluster.GetNamespace(), kindWorkloadCluster.GetName())
clusterProfile := getClusterProfile(namePrefix, map[string]string{key: value})
Expand Down
2 changes: 1 addition & 1 deletion test/pullmode-sveltosapplier.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: docker.io/projectsveltos/sveltos-applier@sha256:63f1b91a85a285d07e220fd083d10eb38aff976516a486becd9842a34fbef50a
image: docker.io/projectsveltos/sveltos-applier@sha256:767293046d16faaa307598ad2013543d2f8cf533cf5f5399e74298b258580495
livenessProbe:
failureThreshold: 3
httpGet:
Expand Down