Skip to content

Commit afc4510

Browse files
Merge pull request #605 from Tai-RedHat/coo-alerting-ui
COO-1015: Automate testing of ACM alerting UI
2 parents 062181b + f778c1a commit afc4510

8 files changed

Lines changed: 383 additions & 2 deletions

File tree

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
// 02.acm_alerting_ui.cy.ts
2+
// E2E test for validating ACM Alerting UI integration with Cluster Observability Operator (COO)
3+
import '../../support/commands/auth-commands';
4+
import { nav } from '../../views/nav';
5+
import { acmAlertingPage } from '../../views/acm-alerting-page';
6+
7+
const MCP = {
8+
namespace: 'openshift-cluster-observability-operator',
9+
packageName: 'cluster-observability-operator',
10+
operatorName: 'Cluster Observability Operator',
11+
config: {
12+
kind: 'UIPlugin',
13+
name: 'monitoring',
14+
},
15+
};
16+
const MP = {
17+
namespace: 'openshift-monitoring',
18+
operatorName: 'Cluster Monitoring Operator',
19+
};
20+
const expectedAlerts = ['Watchdog', 'Watchdog-spoke', 'ClusterCPUHealth-jb'];
21+
22+
describe('ACM Alerting UI', () => {
23+
before(() => {
24+
cy.beforeBlockACM(MCP, MP);
25+
});
26+
27+
it('Navigate to Fleet Management > local-cluster > Observe > Alerting', () => {
28+
// wait for console page loading completed
29+
cy.visit('/');
30+
cy.get('body', { timeout: 60000 }).should('contain.text', 'Administrator');
31+
// switch to Fleet Management page
32+
cy.switchPerspective('Fleet Management');
33+
// close pop-up window
34+
cy.closeOnboardingModalIfPresent();
35+
// click “local-cluster” when visible
36+
cy.log('Waiting for local-cluster link to appear...');
37+
cy.contains('local-cluster', { timeout: 120000 })
38+
.should('exist')
39+
.should('be.visible')
40+
.then(($el) => {
41+
cy.wrap($el).click({ force: true });
42+
});
43+
// click side menu -> Observe -> Alerting
44+
nav.sidenav.clickNavLink(['Observe', 'Alerting']);
45+
// Wait for alert tab content to become visible
46+
cy.get('section#alerts-tab-content', { timeout: 60000 })
47+
.should('be.visible');
48+
// confirm Alerting page loading completed
49+
acmAlertingPage.shouldBeLoaded();
50+
// check three test alerts exist
51+
expectedAlerts.forEach((alert) => {
52+
cy.contains('a[data-test-id="alert-resource-link"]', alert, { timeout: 60000 })
53+
.should('be.visible');
54+
});
55+
cy.log('Verified all expected alerts are visible on the Alerting page');
56+
cy.log('ACM Alerting UI test completed successfully');
57+
});
58+
});
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
apiVersion: v1
2+
kind: ConfigMap
3+
metadata:
4+
name: thanos-ruler-custom-rules
5+
namespace: open-cluster-management-observability
6+
data:
7+
custom_rules.yaml: |
8+
groups:
9+
- name: alertrule-testing
10+
rules:
11+
- alert: Watchdog
12+
annotations:
13+
summary: An alert that should always be firing to certify that Alertmanager is working properly.
14+
description: This is an alert meant to ensure that the entire alerting pipeline is functional.
15+
expr: vector(1)
16+
labels:
17+
instance: "local"
18+
cluster: "local"
19+
clusterID: "111111111"
20+
severity: info
21+
- alert: Watchdog-spoke
22+
annotations:
23+
summary: An alert that should always be firing to certify that Alertmanager is working properly.
24+
description: This is an alert meant to ensure that the entire alerting pipeline is functional.
25+
expr: vector(1)
26+
labels:
27+
instance: "spoke"
28+
cluster: "spoke"
29+
clusterID: "22222222"
30+
severity: warn
31+
- name: cluster-health
32+
rules:
33+
- alert: ClusterCPUHealth-jb
34+
annotations:
35+
summary: Notify when CPU utilization on a cluster is greater than the defined utilization limit
36+
description: "The cluster has a high CPU usage: core for"
37+
expr: |
38+
max(cluster:cpu_usage_cores:sum) by (clusterID, cluster, prometheus) > 0
39+
labels:
40+
cluster: "{{ $labels.cluster }}"
41+
prometheus: "{{ $labels.prometheus }}"
42+
severity: critical
Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
#!/bin/bash
2+
set -eux
3+
oc patch Scheduler cluster --type='json' -p '[{ "op": "replace", "path": "/spec/mastersSchedulable", "value": true }]'
4+
5+
oc apply -f - <<EOF
6+
apiVersion: v1
7+
kind: Namespace
8+
metadata:
9+
name: open-cluster-management
10+
---
11+
apiVersion: operators.coreos.com/v1
12+
kind: OperatorGroup
13+
metadata:
14+
namespace: open-cluster-management
15+
name: og-global
16+
labels:
17+
og_label: open-cluster-management
18+
spec:
19+
targetNamespaces:
20+
- open-cluster-management
21+
upgradeStrategy: Default
22+
EOF
23+
oc apply -f - <<EOF
24+
---
25+
apiVersion: operators.coreos.com/v1alpha1
26+
kind: Subscription
27+
metadata:
28+
labels:
29+
operators.coreos.com/advanced-cluster-management.open-cluster-management: ""
30+
name: advanced-cluster-management
31+
namespace: open-cluster-management
32+
spec:
33+
installPlanApproval: Automatic
34+
name: advanced-cluster-management
35+
source: redhat-operators
36+
sourceNamespace: openshift-marketplace
37+
---
38+
EOF
39+
tries=30
40+
while [[ $tries -gt 0 ]] &&
41+
! oc -n open-cluster-management rollout status deploy/multiclusterhub-operator; do
42+
sleep 10
43+
((tries--))
44+
done
45+
oc wait -n open-cluster-management --for=condition=Available deploy/multiclusterhub-operator --timeout=300s
46+
oc apply -f - <<EOF
47+
apiVersion: operator.open-cluster-management.io/v1
48+
kind: MultiClusterHub
49+
metadata:
50+
name: multiclusterhub
51+
namespace: open-cluster-management
52+
spec: {}
53+
EOF
54+
sleep 5m
55+
oc wait -n open-cluster-management --for=condition=Available deploy/search-api --timeout=300s
56+
oc wait -n open-cluster-management --for=condition=Available deploy/search-collector --timeout=300s
57+
oc wait -n open-cluster-management --for=condition=Available deploy/search-indexer --timeout=300s
58+
oc -n open-cluster-management get pod
59+
#create multi-cluster
60+
if ! oc get ns open-cluster-management-observability >/dev/null 2>&1; then
61+
echo "[INFO] Creating namespace open-cluster-management-observability"
62+
oc create ns open-cluster-management-observability
63+
else
64+
echo "[INFO] Namespace open-cluster-management-observability already exists"
65+
fi
66+
oc apply -f -<<EOF
67+
apiVersion: apps/v1
68+
kind: Deployment
69+
metadata:
70+
name: minio
71+
namespace: open-cluster-management-observability
72+
labels:
73+
app.kubernetes.io/name: minio
74+
spec:
75+
replicas: 1
76+
selector:
77+
matchLabels:
78+
app.kubernetes.io/name: minio
79+
strategy:
80+
type: Recreate
81+
template:
82+
metadata:
83+
labels:
84+
app.kubernetes.io/name: minio
85+
spec:
86+
containers:
87+
- command:
88+
- /bin/sh
89+
- -c
90+
- mkdir -p /storage/thanos && /usr/bin/minio server /storage
91+
env:
92+
- name: MINIO_ACCESS_KEY
93+
value: minio
94+
- name: MINIO_SECRET_KEY
95+
value: minio123
96+
image: quay.io/minio/minio:RELEASE.2021-08-25T00-41-18Z
97+
name: minio
98+
ports:
99+
- containerPort: 9000
100+
protocol: TCP
101+
volumeMounts:
102+
- mountPath: /storage
103+
name: storage
104+
volumes:
105+
- name: storage
106+
persistentVolumeClaim:
107+
claimName: minio
108+
EOF
109+
oc apply -f - <<EOF
110+
apiVersion: v1
111+
kind: PersistentVolumeClaim
112+
metadata:
113+
labels:
114+
app.kubernetes.io/name: minio
115+
name: minio
116+
namespace: open-cluster-management-observability
117+
spec:
118+
accessModes:
119+
- ReadWriteOnce
120+
resources:
121+
requests:
122+
storage: "1Gi"
123+
EOF
124+
oc apply -f - <<EOF
125+
apiVersion: v1
126+
stringData:
127+
thanos.yaml: |
128+
type: s3
129+
config:
130+
bucket: "thanos"
131+
endpoint: "minio:9000"
132+
insecure: true
133+
access_key: "minio"
134+
secret_key: "minio123"
135+
kind: Secret
136+
metadata:
137+
name: thanos-object-storage
138+
namespace: open-cluster-management-observability
139+
type: Opaque
140+
EOF
141+
oc apply -f -<<EOF
142+
apiVersion: v1
143+
kind: Service
144+
metadata:
145+
name: minio
146+
namespace: open-cluster-management-observability
147+
spec:
148+
ports:
149+
- port: 9000
150+
protocol: TCP
151+
targetPort: 9000
152+
selector:
153+
app.kubernetes.io/name: minio
154+
type: ClusterIP
155+
EOF
156+
oc wait -n open-cluster-management-observability --for=condition=Available deploy/minio --timeout=300s
157+
oc apply -f - <<EOF
158+
apiVersion: observability.open-cluster-management.io/v1beta2
159+
kind: MultiClusterObservability
160+
metadata:
161+
name: observability
162+
spec:
163+
observabilityAddonSpec: {}
164+
storageConfig:
165+
metricObjectStorage:
166+
name: thanos-object-storage
167+
key: thanos.yaml
168+
EOF
169+
sleep 1m
170+
oc wait --for=condition=Ready pod -l alertmanager=observability,app=multicluster-observability-alertmanager -n open-cluster-management-observability --timeout=300s
171+
oc -n open-cluster-management-observability get pod
172+
oc -n open-cluster-management-observability get svc | grep -E 'alertmanager|rbac-query'
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
apiVersion: observability.openshift.io/v1alpha1
2+
kind: UIPlugin
3+
metadata:
4+
name: monitoring
5+
spec:
6+
monitoring:
7+
acm:
8+
enabled: true
9+
alertmanager:
10+
url: 'https://alertmanager.open-cluster-management-observability.svc:9095'
11+
thanosQuerier:
12+
url: 'https://rbac-query-proxy.open-cluster-management-observability.svc:8443'
13+
type: Monitoring
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
#!/bin/bash
2+
# this scrpit use to uninstall ACM test resources and do not excute automatlly with test
3+
set -eux
4+
echo "[ACM Uninstall] Using KUBECONFIG=${KUBECONFIG:-~/.kube/config}"
5+
6+
# --- Step 1: delete MultiClusterObservability ---
7+
echo "[ACM Uninstall] Deleting MultiClusterObservability (MCO)..."
8+
oc delete MultiClusterObservability observability -n open-cluster-management-observability --ignore-not-found=true
9+
10+
# --- Step 2: delete MinIO、PVC、Secret、Service ---
11+
echo "[ACM Uninstall] Cleaning up MinIO and related resources..."
12+
oc delete deploy minio -n open-cluster-management-observability --ignore-not-found=true
13+
oc delete pvc minio -n open-cluster-management-observability --ignore-not-found=true
14+
oc delete secret thanos-object-storage -n open-cluster-management-observability --ignore-not-found=true
15+
oc delete svc minio -n open-cluster-management-observability --ignore-not-found=true
16+
17+
# --- Step 3: delete MultiClusterHub ---
18+
echo "[ACM Uninstall] Deleting MultiClusterHub..."
19+
oc delete MultiClusterHub multiclusterhub -n open-cluster-management --ignore-not-found=true
20+
21+
# wait for MultiClusterHub deleted
22+
echo "[ACM Uninstall] Waiting for MultiClusterHub cleanup..."
23+
oc wait MultiClusterHub multiclusterhub -n open-cluster-management --for=delete --timeout=300s || true
24+
25+
# --- Step 4: delete Subscription and OperatorGroup ---
26+
echo "[ACM Uninstall] Deleting ACM Operator Subscription & OperatorGroup..."
27+
oc delete sub advanced-cluster-management -n open-cluster-management --ignore-not-found=true
28+
oc delete og og-global -n open-cluster-management --ignore-not-found=true
29+
30+
# --- Step 5: delete namespace ---
31+
echo "[ACM Uninstall] Deleting ACM-related namespaces..."
32+
oc delete ns open-cluster-management-observability --ignore-not-found=true
33+
oc delete ns open-cluster-management --ignore-not-found=true
34+
35+
# # --- Step 6: clean up CRDs(optional)---
36+
# echo "[ACM Uninstall] Cleaning up CRDs (optional cleanup)..."
37+
# oc delete crd multiclusterhubs.operator.open-cluster-management.io --ignore-not-found=true
38+
# oc delete crd multiclusterobservabilities.observability.open-cluster-management.io --ignore-not-found=true
39+
40+
# --- Step 7: Pod / Finalizer ---
41+
# echo "[ACM Uninstall] Removing potential finalizers..."
42+
# oc get ns open-cluster-management-observability -o json | jq '.spec.finalizers=[]' | oc replace --raw "/api/v1/namespaces/open-cluster-management-observability/finalize" -f - || true
43+
# oc get ns open-cluster-management -o json | jq '.spec.finalizers=[]' | oc replace --raw "/api/v1/namespaces/open-cluster-management/finalize" -f - || true
44+
45+
echo "[ACM Uninstall] ✅ Completed cleanup."

web/cypress/support/commands/operator-commands.ts

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ declare global {
2020
cleanupCOO(MCP: { namespace: string, operatorName: string, packageName: string }, MP: { namespace: string, operatorName: string});
2121
RemoveClusterAdminRole();
2222
setupCOO(MCP: { namespace: string, operatorName: string, packageName: string }, MP: { namespace: string, operatorName: string });
23+
beforeBlockACM( MCP: { namespace: string; operatorName: string; packageName: string }, MP: { namespace: string; operatorName: string },): Chainable<void>;
24+
closeOnboardingModalIfPresent(): Chainable<void>;
2325
}
2426
}
2527
}
@@ -567,4 +569,37 @@ Cypress.Commands.add('beforeBlock', (MP: { namespace: string, operatorName: stri
567569
cy.log('Remove cluster-admin role from user.');
568570
operatorUtils.RemoveClusterAdminRole();
569571
cy.log('Remove cluster-admin role from user completed');
570-
});
572+
});
573+
574+
Cypress.Commands.add('beforeBlockACM', (MCP, MP) => {
575+
cy.beforeBlockCOO(MCP, MP);
576+
cy.log('=== [Setup] Installing ACM Operator & MCO ===');
577+
cy.exec('bash ./cypress/fixtures/coo/acm-install.sh', {
578+
env: { KUBECONFIG: Cypress.env('KUBECONFIG_PATH'), },
579+
failOnNonZeroExit: false,
580+
timeout: 1200000, // long time script
581+
});
582+
cy.exec(`oc apply -f ./cypress/fixtures/coo/acm-uiplugin.yaml --kubeconfig ${Cypress.env('KUBECONFIG_PATH')}`);
583+
// add example alerts for test
584+
cy.exec(`oc apply -f ./cypress/fixtures/coo/acm-alerrule-test.yaml --kubeconfig ${Cypress.env('KUBECONFIG_PATH')}`);
585+
cy.log('ACM environment setup completed');
586+
});
587+
588+
Cypress.Commands.add('closeOnboardingModalIfPresent', () => {
589+
cy.get('body').then(($body) => {
590+
const modalSelector = 'button[data-ouia-component-id="clustersOnboardingModal-ModalBoxCloseButton"]';
591+
if ($body.find(modalSelector).length > 0) {
592+
cy.log('Onboarding modal detected, attempting to close...');
593+
cy.get(modalSelector, { timeout: 20000 })
594+
.should('be.visible')
595+
.should('not.be.disabled')
596+
.click({ force: true });
597+
598+
cy.get(modalSelector, { timeout: 10000 })
599+
.should('not.exist')
600+
.then(() => cy.log('Modal successfully closed'));
601+
} else {
602+
cy.log('No onboarding modal found');
603+
}
604+
});
605+
});

web/cypress/support/index.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,13 @@ Cypress.on('uncaught:exception', (err) => {
2020
message.includes('ResizeObserver loop limit exceeded') ||
2121
message.includes('ResizeObserver loop completed with undelivered notifications') ||
2222
message.includes('ResizeObserver') ||
23-
message.includes('Cannot read properties of undefined')
23+
message.includes('Cannot read properties of undefined') ||
24+
message.includes('Unauthorized') ||
25+
message.includes('Bad Gateway') ||
26+
message.includes(`Cannot read properties of null (reading 'default')`) ||
27+
message.includes(`(intermediate value) is not a function`)
2428
) {
29+
console.warn('Ignored frontend exception:', err.message);
2530
return false;
2631
}
2732
// allow other errors to fail the test

0 commit comments

Comments
 (0)