To Force delete namespace
Use command below without any changes. it works like a charm.
NS=`kubectl get ns |grep Terminating | awk 'NR==1 {print $1}'` && kubectl get namespace "$NS" -o json | tr -d "\\n" | sed "s/\\"finalizers\\": \\[[^]]\\+\\]/\\"finalizers\\": []/" | kubectl replace --raw /api/v1/namespaces/$NS/finalize -f -
kubectl get ClusterRole
kubectl delete ClusterRole gpu-operator gpu-operator-1715932386-node-feature-discovery gpu-operator-1715932386-node-feature-discovery-gc nvidia-device-plugin nvidia-device-plugin-mps-control-daemon nvidia-driver nvidia-gpu-feature-discovery nvidia-mig-manager nvidia-operator-validator
kubectl get ClusterRoleBinding
kubectl delete ClusterRoleBinding gpu-operator gpu-operator-1715932386-node-feature-discovery gpu-operator-1715932386-node-feature-discovery-gc nvidia-device-plugin nvidia-device-plugin-mps-control-daemon nvidia-driver nvidia-gpu-feature-discovery nvidia-mig-manager nvidia-operator-validator
kubectl get ClusterPolicy
kubectl delete ClusterPolicy cluster-policy
helm list -n gpu-operator
helm uninstall <release-name> -n gpu-operator
kubectl delete pod NAME --grace-period=0 --force
kubectl get pods --all-namespaces -o wide
helm install --wait --generate-name \\
-n gpu-operator --create-namespace \\
nvidia/gpu-operator \\
--set driver.enabled=false \\
--set toolkit.enabled=false
helm install --generate-name\\
-n gpu-operator --create-namespace\\
nvidia/gpu-operator\\
--set driver.enabled=false \\
--set toolkit.enabled=false
helm install --generate-name\\
--namespace gpu-operator --create-namespace \\
nvidia/gpu-operator
--set driver.enabled=false
helm delete $(helm list | grep gpu-operator | awk '{print $1}')
kubectl get pods -n gpu-operator-resources