@@ -87,6 +87,8 @@ func RegisterSteps(sc *godog.ScenarioContext) {
8787 sc .Step (`^(?i)resource apply fails with error msg containing "([^"]+)"$` , ResourceApplyFails )
8888 sc .Step (`^(?i)resource "([^"]+)" is eventually restored$` , ResourceRestored )
8989 sc .Step (`^(?i)resource "([^"]+)" matches$` , ResourceMatches )
90+ sc .Step (`^(?i)user adds restart annotation to "([^"]+)"$` , UserAddsRestartAnnotation )
91+ sc .Step (`^(?i)resource "([^"]+)" has restart annotation$` , ResourceHasRestartAnnotation )
9092
9193 sc .Step (`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in test namespace$` , ServiceAccountWithNeededPermissionsIsAvailableInNamespace )
9294 sc .Step (`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in \${TEST_NAMESPACE}$` , ServiceAccountWithNeededPermissionsIsAvailableInNamespace )
@@ -1168,3 +1170,64 @@ func latestActiveRevisionForExtension(extName string) (*ocv1.ClusterExtensionRev
11681170
11691171 return latest , nil
11701172}
1173+
1174+ // UserAddsRestartAnnotation simulates a user running `kubectl rollout restart deployment/<name>`.
1175+ // This adds a restart annotation to the deployment's pod template to trigger a rolling restart.
1176+ // In OLMv0, this annotation would be reverted by the controller. In OLMv1 with Server-Side Apply,
1177+ // it should persist because the user (kubectl) manages this field, not the controller.
1178+ // See: https://github.com/operator-framework/operator-lifecycle-manager/issues/3392
1179+ func UserAddsRestartAnnotation (ctx context.Context , resourceName string ) error {
1180+ sc := scenarioCtx (ctx )
1181+ resourceName = substituteScenarioVars (resourceName , sc )
1182+
1183+ kind , _ , ok := strings .Cut (resourceName , "/" )
1184+ if ! ok {
1185+ return fmt .Errorf ("invalid resource name format: %s (expected kind/name)" , resourceName )
1186+ }
1187+
1188+ if kind != "deployment" {
1189+ return fmt .Errorf ("only deployment resources are supported for restart annotation, got: %s" , kind )
1190+ }
1191+
1192+ // Use kubectl rollout restart to add the restart annotation
1193+ // This is the actual command users would run, ensuring we test real-world behavior
1194+ _ , err := k8sClient ("rollout" , "restart" , resourceName , "-n" , sc .namespace )
1195+ if err != nil {
1196+ return fmt .Errorf ("failed to rollout restart %s: %w" , resourceName , err )
1197+ }
1198+
1199+ return nil
1200+ }
1201+
1202+ // ResourceHasRestartAnnotation verifies that a deployment has a restart annotation.
1203+ // This confirms that user-initiated changes persist after OLM reconciliation.
1204+ func ResourceHasRestartAnnotation (ctx context.Context , resourceName string ) error {
1205+ sc := scenarioCtx (ctx )
1206+ resourceName = substituteScenarioVars (resourceName , sc )
1207+
1208+ kind , deploymentName , ok := strings .Cut (resourceName , "/" )
1209+ if ! ok {
1210+ return fmt .Errorf ("invalid resource name format: %s (expected kind/name)" , resourceName )
1211+ }
1212+
1213+ if kind != "deployment" {
1214+ return fmt .Errorf ("only deployment resources are supported for restart annotation check, got: %s" , kind )
1215+ }
1216+
1217+ // Check for the restart annotation added by kubectl rollout restart
1218+ restartAnnotationKey := "kubectl.kubernetes.io/restartedAt"
1219+
1220+ waitFor (ctx , func () bool {
1221+ // Get the restart annotation from the deployment's pod template
1222+ out , err := k8sClient ("get" , "deployment" , deploymentName , "-n" , sc .namespace ,
1223+ "-o" , fmt .Sprintf ("jsonpath={.spec.template.metadata.annotations['%s']}" , restartAnnotationKey ))
1224+ if err != nil {
1225+ return false
1226+ }
1227+
1228+ // If the annotation exists and has a value, it persisted
1229+ return out != ""
1230+ })
1231+
1232+ return nil
1233+ }
0 commit comments